diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
31 files changed, 3218 insertions, 720 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 454e186f736..6ed45a98423 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -586,7 +586,53 @@ static int i915_interrupt_info(struct seq_file *m, void *data) if (ret) return ret; - if (IS_VALLEYVIEW(dev)) { + if (INTEL_INFO(dev)->gen >= 8) { + int i; + seq_printf(m, "Master Interrupt Control:\t%08x\n", + I915_READ(GEN8_MASTER_IRQ)); + + for (i = 0; i < 4; i++) { + seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", + i, I915_READ(GEN8_GT_IMR(i))); + seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", + i, I915_READ(GEN8_GT_IIR(i))); + seq_printf(m, "GT Interrupt IER %d:\t%08x\n", + i, I915_READ(GEN8_GT_IER(i))); + } + + for_each_pipe(i) { + seq_printf(m, "Pipe %c IMR:\t%08x\n", + pipe_name(i), + I915_READ(GEN8_DE_PIPE_IMR(i))); + seq_printf(m, "Pipe %c IIR:\t%08x\n", + pipe_name(i), + I915_READ(GEN8_DE_PIPE_IIR(i))); + seq_printf(m, "Pipe %c IER:\t%08x\n", + pipe_name(i), + I915_READ(GEN8_DE_PIPE_IER(i))); + } + + seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", + I915_READ(GEN8_DE_PORT_IMR)); + seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", + I915_READ(GEN8_DE_PORT_IIR)); + seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", + I915_READ(GEN8_DE_PORT_IER)); + + seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", + I915_READ(GEN8_DE_MISC_IMR)); + seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", + I915_READ(GEN8_DE_MISC_IIR)); + seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", + I915_READ(GEN8_DE_MISC_IER)); + + seq_printf(m, "PCU interrupt mask:\t%08x\n", + I915_READ(GEN8_PCU_IMR)); + seq_printf(m, "PCU interrupt identity:\t%08x\n", + I915_READ(GEN8_PCU_IIR)); + seq_printf(m, "PCU interrupt enable:\t%08x\n", + I915_READ(GEN8_PCU_IER)); + } else if (IS_VALLEYVIEW(dev)) { seq_printf(m, "Display IER:\t%08x\n", I915_READ(VLV_IER)); seq_printf(m, "Display IIR:\t%08x\n", @@ -658,7 +704,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); for_each_ring(ring, dev_priv, i) { - if (IS_GEN6(dev) || IS_GEN7(dev)) { + if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", ring->name, I915_READ_IMR(ring)); @@ -1577,7 +1623,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) I915_READ16(C0DRB3)); seq_printf(m, "C1DRB3 = 0x%04x\n", I915_READ16(C1DRB3)); - } else if (IS_GEN6(dev) || IS_GEN7(dev)) { + } else if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", I915_READ(MAD_DIMM_C0)); seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", @@ -1586,8 +1632,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data) I915_READ(MAD_DIMM_C2)); seq_printf(m, "TILECTL = 0x%08x\n", I915_READ(TILECTL)); - seq_printf(m, "ARB_MODE = 0x%08x\n", - I915_READ(ARB_MODE)); + if (IS_GEN8(dev)) + seq_printf(m, "GAMTARBMODE = 0x%08x\n", + I915_READ(GAMTARBMODE)); + else + seq_printf(m, "ARB_MODE = 0x%08x\n", + I915_READ(ARB_MODE)); seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", I915_READ(DISP_ARB_CTL)); } @@ -1596,18 +1646,37 @@ static int i915_swizzle_info(struct seq_file *m, void *data) return 0; } -static int i915_ppgtt_info(struct seq_file *m, void *data) +static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; - int i, ret; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + int unused, i; + if (!ppgtt) + return; + + seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); + seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages); + for_each_ring(ring, dev_priv, unused) { + seq_printf(m, "%s\n", ring->name); + for (i = 0; i < 4; i++) { + u32 offset = 0x270 + i * 8; + u64 pdp = I915_READ(ring->mmio_base + offset + 4); + pdp <<= 32; + pdp |= I915_READ(ring->mmio_base + offset); + for (i = 0; i < 4; i++) + seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); + } + } +} + +static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; if (INTEL_INFO(dev)->gen == 6) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); @@ -1626,6 +1695,22 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); } seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); +} + +static int i915_ppgtt_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + + int ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + if (INTEL_INFO(dev)->gen >= 8) + gen8_ppgtt_info(m, dev); + else if (INTEL_INFO(dev)->gen >= 6) + gen6_ppgtt_info(m, dev); + mutex_unlock(&dev->struct_mutex); return 0; @@ -1772,13 +1857,18 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep) struct drm_i915_private *dev_priv = info->dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - if (!atomic_dec_and_test(&pipe_crc->available)) { - atomic_inc(&pipe_crc->available); + spin_lock_irq(&pipe_crc->lock); + + if (pipe_crc->opened) { + spin_unlock_irq(&pipe_crc->lock); return -EBUSY; /* already open */ } + pipe_crc->opened = true; filep->private_data = inode->i_private; + spin_unlock_irq(&pipe_crc->lock); + return 0; } @@ -1788,7 +1878,9 @@ static int i915_pipe_crc_release(struct inode *inode, struct file *filep) struct drm_i915_private *dev_priv = info->dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - atomic_inc(&pipe_crc->available); /* release the device */ + spin_lock_irq(&pipe_crc->lock); + pipe_crc->opened = false; + spin_unlock_irq(&pipe_crc->lock); return 0; } @@ -1800,12 +1892,9 @@ static int i915_pipe_crc_release(struct inode *inode, struct file *filep) static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) { - int head, tail; - - head = atomic_read(&pipe_crc->head); - tail = atomic_read(&pipe_crc->tail); - - return CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR); + assert_spin_locked(&pipe_crc->lock); + return CIRC_CNT(pipe_crc->head, pipe_crc->tail, + INTEL_PIPE_CRC_ENTRIES_NR); } static ssize_t @@ -1831,20 +1920,30 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, return 0; /* nothing to read */ + spin_lock_irq(&pipe_crc->lock); while (pipe_crc_data_count(pipe_crc) == 0) { - if (filep->f_flags & O_NONBLOCK) + int ret; + + if (filep->f_flags & O_NONBLOCK) { + spin_unlock_irq(&pipe_crc->lock); return -EAGAIN; + } - if (wait_event_interruptible(pipe_crc->wq, - pipe_crc_data_count(pipe_crc))) - return -ERESTARTSYS; + ret = wait_event_interruptible_lock_irq(pipe_crc->wq, + pipe_crc_data_count(pipe_crc), pipe_crc->lock); + if (ret) { + spin_unlock_irq(&pipe_crc->lock); + return ret; + } } /* We now have one or more entries to read */ - head = atomic_read(&pipe_crc->head); - tail = atomic_read(&pipe_crc->tail); + head = pipe_crc->head; + tail = pipe_crc->tail; n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), count / PIPE_CRC_LINE_LEN); + spin_unlock_irq(&pipe_crc->lock); + bytes_read = 0; n = 0; do { @@ -1864,10 +1963,13 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); - atomic_set(&pipe_crc->tail, tail); n++; } while (--n_entries); + spin_lock_irq(&pipe_crc->lock); + pipe_crc->tail = tail; + spin_unlock_irq(&pipe_crc->lock); + return bytes_read; } @@ -1915,6 +2017,11 @@ static const char * const pipe_crc_sources[] = { "plane2", "pf", "pipe", + "TV", + "DP-B", + "DP-C", + "DP-D", + "auto", }; static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) @@ -1943,33 +2050,279 @@ static int display_crc_ctl_open(struct inode *inode, struct file *file) return single_open(file, display_crc_ctl_show, dev); } -static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, +static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, + uint32_t *val) +{ + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, + enum intel_pipe_crc_source *source) +{ + struct intel_encoder *encoder; + struct intel_crtc *crtc; + struct intel_digital_port *dig_port; + int ret = 0; + + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (!encoder->base.crtc) + continue; + + crtc = to_intel_crtc(encoder->base.crtc); + + if (crtc->pipe != pipe) + continue; + + switch (encoder->type) { + case INTEL_OUTPUT_TVOUT: + *source = INTEL_PIPE_CRC_SOURCE_TV; + break; + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_EDP: + dig_port = enc_to_dig_port(&encoder->base); + switch (dig_port->port) { + case PORT_B: + *source = INTEL_PIPE_CRC_SOURCE_DP_B; + break; + case PORT_C: + *source = INTEL_PIPE_CRC_SOURCE_DP_C; + break; + case PORT_D: + *source = INTEL_PIPE_CRC_SOURCE_DP_D; + break; + default: + WARN(1, "nonexisting DP port %c\n", + port_name(dig_port->port)); + break; + } + break; + } + } + mutex_unlock(&dev->mode_config.mutex); + + return ret; +} + +static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, + enum pipe pipe, + enum intel_pipe_crc_source *source, + uint32_t *val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool need_stable_symbols = false; + + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { + int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); + if (ret) + return ret; + } + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; + break; + case INTEL_PIPE_CRC_SOURCE_DP_B: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_DP_C: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + /* + * When the pipe CRC tap point is after the transcoders we need + * to tweak symbol-level features to produce a deterministic series of + * symbols for a given frame. We need to reset those features only once + * a frame (instead of every nth symbol): + * - DC-balance: used to ensure a better clock recovery from the data + * link (SDVO) + * - DisplayPort scrambling: used for EMI reduction + */ + if (need_stable_symbols) { + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + WARN_ON(!IS_G4X(dev)); + + tmp |= DC_BALANCE_RESET_VLV; + if (pipe == PIPE_A) + tmp |= PIPE_A_SCRAMBLE_RESET; + else + tmp |= PIPE_B_SCRAMBLE_RESET; + + I915_WRITE(PORT_DFT2_G4X, tmp); + } + + return 0; +} + +static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, + enum pipe pipe, + enum intel_pipe_crc_source *source, + uint32_t *val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool need_stable_symbols = false; + + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { + int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); + if (ret) + return ret; + } + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; + break; + case INTEL_PIPE_CRC_SOURCE_TV: + if (!SUPPORTS_TV(dev)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; + break; + case INTEL_PIPE_CRC_SOURCE_DP_B: + if (!IS_G4X(dev)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_DP_C: + if (!IS_G4X(dev)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_DP_D: + if (!IS_G4X(dev)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + /* + * When the pipe CRC tap point is after the transcoders we need + * to tweak symbol-level features to produce a deterministic series of + * symbols for a given frame. We need to reset those features only once + * a frame (instead of every nth symbol): + * - DC-balance: used to ensure a better clock recovery from the data + * link (SDVO) + * - DisplayPort scrambling: used for EMI reduction + */ + if (need_stable_symbols) { + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + WARN_ON(!IS_G4X(dev)); + + I915_WRITE(PORT_DFT_I9XX, + I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); + + if (pipe == PIPE_A) + tmp |= PIPE_A_SCRAMBLE_RESET; + else + tmp |= PIPE_B_SCRAMBLE_RESET; + + I915_WRITE(PORT_DFT2_G4X, tmp); + } + + return 0; +} + +static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, + enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + if (pipe == PIPE_A) + tmp &= ~PIPE_A_SCRAMBLE_RESET; + else + tmp &= ~PIPE_B_SCRAMBLE_RESET; + if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) + tmp &= ~DC_BALANCE_RESET_VLV; + I915_WRITE(PORT_DFT2_G4X, tmp); + +} + +static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, + enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + if (pipe == PIPE_A) + tmp &= ~PIPE_A_SCRAMBLE_RESET; + else + tmp &= ~PIPE_B_SCRAMBLE_RESET; + I915_WRITE(PORT_DFT2_G4X, tmp); + + if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { + I915_WRITE(PORT_DFT_I9XX, + I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); + } +} + +static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, uint32_t *val) { - switch (source) { + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + switch (*source) { case INTEL_PIPE_CRC_SOURCE_PLANE1: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; break; case INTEL_PIPE_CRC_SOURCE_PLANE2: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; break; - case INTEL_PIPE_CRC_SOURCE_PF: - return -EINVAL; case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; break; - default: + case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; + default: + return -EINVAL; } return 0; } -static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, +static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, uint32_t *val) { - switch (source) { + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PF; + + switch (*source) { case INTEL_PIPE_CRC_SOURCE_PLANE1: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; break; @@ -1979,11 +2332,11 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, case INTEL_PIPE_CRC_SOURCE_PF: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; break; - case INTEL_PIPE_CRC_SOURCE_PIPE: - return -EINVAL; - default: + case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; + default: + return -EINVAL; } return 0; @@ -1997,9 +2350,6 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, u32 val; int ret; - if (!(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev))) - return -ENODEV; - if (pipe_crc->source == source) return 0; @@ -2007,10 +2357,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (pipe_crc->source && source) return -EINVAL; - if (IS_GEN5(dev) || IS_GEN6(dev)) - ret = ilk_pipe_crc_ctl_reg(source, &val); + if (IS_GEN2(dev)) + ret = i8xx_pipe_crc_ctl_reg(&source, &val); + else if (INTEL_INFO(dev)->gen < 5) + ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); + else if (IS_VALLEYVIEW(dev)) + ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); + else if (IS_GEN5(dev) || IS_GEN6(dev)) + ret = ilk_pipe_crc_ctl_reg(&source, &val); else - ret = ivb_pipe_crc_ctl_reg(source, &val); + ret = ivb_pipe_crc_ctl_reg(&source, &val); if (ret != 0) return ret; @@ -2026,8 +2382,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (!pipe_crc->entries) return -ENOMEM; - atomic_set(&pipe_crc->head, 0); - atomic_set(&pipe_crc->tail, 0); + spin_lock_irq(&pipe_crc->lock); + pipe_crc->head = 0; + pipe_crc->tail = 0; + spin_unlock_irq(&pipe_crc->lock); } pipe_crc->source = source; @@ -2037,13 +2395,24 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, /* real source -> none transition */ if (source == INTEL_PIPE_CRC_SOURCE_NONE) { + struct intel_pipe_crc_entry *entries; + DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", pipe_name(pipe)); intel_wait_for_vblank(dev, pipe); - kfree(pipe_crc->entries); + spin_lock_irq(&pipe_crc->lock); + entries = pipe_crc->entries; pipe_crc->entries = NULL; + spin_unlock_irq(&pipe_crc->lock); + + kfree(entries); + + if (IS_G4X(dev)) + g4x_undo_pipe_scramble_reset(dev, pipe); + else if (IS_VALLEYVIEW(dev)) + vlv_undo_pipe_scramble_reset(dev, pipe); } return 0; @@ -2738,7 +3107,8 @@ void intel_display_crc_init(struct drm_device *dev) for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i]; - atomic_set(&pipe_crc->available, 1); + pipe_crc->opened = false; + spin_lock_init(&pipe_crc->lock); init_waitqueue_head(&pipe_crc->wq); } } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 437886641d9..0cab2d04513 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev) * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ - if (!HAS_PCH_SPLIT(dev)) { - ret = vga_client_register(dev->pdev, dev, NULL, - i915_vga_set_decode); - if (ret && ret != -ENODEV) - goto out; - } + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret && ret != -ENODEV) + goto out; intel_register_dsm_handler(); @@ -1314,10 +1311,7 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_gem_stolen; - intel_init_power_well(dev); - - /* Keep VGA alive until i915_disable_vga_mem() */ - intel_display_power_get(dev, POWER_DOMAIN_VGA); + intel_power_domains_init_hw(dev); /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ @@ -1358,13 +1352,6 @@ static int i915_load_modeset_init(struct drm_device *dev) */ intel_fbdev_initial_config(dev); - /* - * Must do this after fbcon init so that - * vgacon_save_screen() works during the handover. - */ - i915_disable_vga_mem(dev); - intel_display_power_put(dev, POWER_DOMAIN_VGA); - /* Only enable hotplug handling once the fbdev is fully set up. */ dev_priv->enable_hotplug_processing = true; @@ -1653,7 +1640,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } if (HAS_POWER_WELL(dev)) - i915_init_power_well(dev); + intel_power_domains_init(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); @@ -1681,7 +1668,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) out_power_well: if (HAS_POWER_WELL(dev)) - i915_remove_power_well(dev); + intel_power_domains_remove(dev); drm_vblank_cleanup(dev); out_gem_unload: if (dev_priv->mm.inactive_shrinker.scan_objects) @@ -1723,8 +1710,8 @@ int i915_driver_unload(struct drm_device *dev) /* The i915.ko module is still not prepared to be loaded when * the power well is not enabled, so just enable it in case * we're going to unload/reload. */ - intel_set_power_well(dev, true); - i915_remove_power_well(dev); + intel_display_set_init_power(dev, true); + intel_power_domains_remove(dev); } i915_teardown_sysfs(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1060a96d218..989be12cdd6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -336,6 +336,24 @@ static const struct intel_device_info intel_haswell_m_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, }; +static const struct intel_device_info intel_broadwell_d_info = { + .is_preliminary = 1, + .gen = 8, .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .has_llc = 1, + .has_ddi = 1, +}; + +static const struct intel_device_info intel_broadwell_m_info = { + .is_preliminary = 1, + .gen = 8, .is_mobile = 1, .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .has_llc = 1, + .has_ddi = 1, +}; + /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem @@ -367,7 +385,9 @@ static const struct intel_device_info intel_haswell_m_info = { INTEL_HSW_D_IDS(&intel_haswell_d_info), \ INTEL_HSW_M_IDS(&intel_haswell_m_info), \ INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ - INTEL_VLV_D_IDS(&intel_valleyview_d_info) + INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ + INTEL_BDW_M_IDS(&intel_broadwell_m_info), \ + INTEL_BDW_D_IDS(&intel_broadwell_d_info) static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_PCI_IDS, @@ -428,6 +448,12 @@ void intel_detect_pch(struct drm_device *dev) DRM_DEBUG_KMS("Found LynxPoint PCH\n"); WARN_ON(!IS_HASWELL(dev)); WARN_ON(IS_ULT(dev)); + } else if (IS_BROADWELL(dev)) { + dev_priv->pch_type = PCH_LPT; + dev_priv->pch_id = + INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; + DRM_DEBUG_KMS("This is Broadwell, assuming " + "LynxPoint LP PCH\n"); } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); @@ -452,6 +478,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6) return 0; + /* Until we get further testing... */ + if (IS_GEN8(dev)) { + WARN_ON(!i915_preliminary_hw_support); + return 0; + } + if (i915_semaphores >= 0) return i915_semaphores; @@ -477,7 +509,7 @@ static int i915_drm_freeze(struct drm_device *dev) /* We do a lot of poking in a lot of registers, make sure they work * properly. */ hsw_disable_package_c8(dev_priv); - intel_set_power_well(dev, true); + intel_display_set_init_power(dev, true); drm_kms_helper_poll_disable(dev); @@ -508,6 +540,8 @@ static int i915_drm_freeze(struct drm_device *dev) intel_modeset_suspend_hw(dev); } + i915_gem_suspend_gtt_mappings(dev); + i915_save_state(dev); intel_opregion_fini(dev); @@ -595,7 +629,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) mutex_unlock(&dev->struct_mutex); } - intel_init_power_well(dev); + intel_power_domains_init_hw(dev); i915_restore_state(dev); intel_opregion_setup(dev); @@ -656,6 +690,9 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) static int i915_drm_thaw(struct drm_device *dev) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) + i915_check_and_clear_faults(dev); + return __i915_drm_thaw(dev, true); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2ea33eebf01..ccdbecca070 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -54,6 +54,7 @@ #define DRIVER_DATE "20080730" enum pipe { + INVALID_PIPE = -1, PIPE_A = 0, PIPE_B, PIPE_C, @@ -98,14 +99,29 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, - POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, + POWER_DOMAIN_TRANSCODER_EDP, POWER_DOMAIN_VGA, + POWER_DOMAIN_INIT, + + POWER_DOMAIN_NUM, }; +#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) + #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) -#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A) +#define POWER_DOMAIN_TRANSCODER(tran) \ + ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ + (tran) + POWER_DOMAIN_TRANSCODER_A) + +#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_A) | \ + BIT(POWER_DOMAIN_TRANSCODER_EDP)) +#define BDW_ALWAYS_ON_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_A) | \ + BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ + BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) enum hpd_pin { HPD_NONE = 0, @@ -231,6 +247,7 @@ struct intel_opregion { struct opregion_asle __iomem *asle; void __iomem *vbt; u32 __iomem *lid_state; + struct work_struct asle_work; }; #define OPREGION_SIZE (8*1024) @@ -288,6 +305,7 @@ struct drm_i915_error_state { u32 cpu_ring_tail[I915_NUM_RINGS]; u32 error; /* gen6+ */ u32 err_int; /* gen7 */ + u32 bbstate[I915_NUM_RINGS]; u32 instpm[I915_NUM_RINGS]; u32 instps[I915_NUM_RINGS]; u32 extra_instdone[I915_NUM_INSTDONE_REG]; @@ -516,10 +534,12 @@ struct i915_address_space { /* FIXME: Need a more generic return type */ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level); + enum i915_cache_level level, + bool valid); /* Create a valid PTE */ void (*clear_range)(struct i915_address_space *vm, unsigned int first_entry, - unsigned int num_entries); + unsigned int num_entries, + bool use_scratch); void (*insert_entries)(struct i915_address_space *vm, struct sg_table *st, unsigned int first_entry, @@ -559,10 +579,21 @@ struct i915_gtt { struct i915_hw_ppgtt { struct i915_address_space base; unsigned num_pd_entries; - struct page **pt_pages; - uint32_t pd_offset; - dma_addr_t *pt_dma_addr; - + union { + struct page **pt_pages; + struct page *gen8_pt_pages; + }; + struct page *pd_pages; + int num_pd_pages; + int num_pt_pages; + union { + uint32_t pd_offset; + dma_addr_t pd_dma_addr[4]; + }; + union { + dma_addr_t *pt_dma_addr; + dma_addr_t *gen8_pt_dma_addr[4]; + }; int (*enable)(struct drm_device *dev); }; @@ -729,6 +760,9 @@ struct i915_suspend_saved_registers { u32 saveBLC_HIST_CTL; u32 saveBLC_PWM_CTL; u32 saveBLC_PWM_CTL2; + u32 saveBLC_HIST_CTL_B; + u32 saveBLC_PWM_CTL_B; + u32 saveBLC_PWM_CTL2_B; u32 saveBLC_CPU_PWM_CTL; u32 saveBLC_CPU_PWM_CTL2; u32 saveFPB0; @@ -898,11 +932,21 @@ struct intel_ilk_power_mgmt { /* Power well structure for haswell */ struct i915_power_well { - struct drm_device *device; - spinlock_t lock; /* power well enable/disable usage count */ int count; - int i915_request; +}; + +#define I915_MAX_POWER_WELLS 1 + +struct i915_power_domains { + /* + * Power wells needed for initialization at driver init and suspend + * time are on. They are kept on until after the first modeset. + */ + bool init_power_on; + + struct mutex lock; + struct i915_power_well power_wells[I915_MAX_POWER_WELLS]; }; struct i915_dri1_state { @@ -1224,6 +1268,12 @@ enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_PLANE2, INTEL_PIPE_CRC_SOURCE_PF, INTEL_PIPE_CRC_SOURCE_PIPE, + /* TV/DP on pre-gen5/vlv can't use the pipe source. */ + INTEL_PIPE_CRC_SOURCE_TV, + INTEL_PIPE_CRC_SOURCE_DP_B, + INTEL_PIPE_CRC_SOURCE_DP_C, + INTEL_PIPE_CRC_SOURCE_DP_D, + INTEL_PIPE_CRC_SOURCE_AUTO, INTEL_PIPE_CRC_SOURCE_MAX, }; @@ -1234,10 +1284,11 @@ struct intel_pipe_crc_entry { #define INTEL_PIPE_CRC_ENTRIES_NR 128 struct intel_pipe_crc { - atomic_t available; /* exclusive access to the device */ + spinlock_t lock; + bool opened; /* exclusive access to the result file */ struct intel_pipe_crc_entry *entries; enum intel_pipe_crc_source source; - atomic_t head, tail; + int head, tail; wait_queue_head_t wq; }; @@ -1286,7 +1337,10 @@ typedef struct drm_i915_private { struct mutex dpio_lock; /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask; + union { + u32 irq_mask; + u32 de_irq_mask[I915_MAX_PIPES]; + }; u32 gt_irq_mask; u32 pm_irq_mask; @@ -1365,6 +1419,10 @@ typedef struct drm_i915_private { struct drm_crtc *pipe_to_crtc_mapping[3]; wait_queue_head_t pending_flip_queue; +#ifdef CONFIG_DEBUG_FS + struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; +#endif + int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; struct intel_ddi_plls ddi_plls; @@ -1390,8 +1448,7 @@ typedef struct drm_i915_private { * mchdev_lock in intel_pm.c */ struct intel_ilk_power_mgmt ips; - /* Haswell power well */ - struct i915_power_well power_well; + struct i915_power_domains power_domains; struct i915_psr psr; @@ -1445,10 +1502,6 @@ typedef struct drm_i915_private { struct i915_dri1_state dri1; /* Old ums support infrastructure, same warning applies. */ struct i915_ums_state ums; - -#ifdef CONFIG_DEBUG_FS - struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; -#endif } drm_i915_private_t; static inline struct drm_i915_private *to_i915(const struct drm_device *dev) @@ -1698,6 +1751,7 @@ struct drm_i915_file_private { (dev)->pdev->device == 0x010A) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) +#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0xFF00) == 0x0C00) @@ -1719,6 +1773,7 @@ struct drm_i915_file_private { #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) +#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) #define RENDER_RING (1<<RCS) #define BSD_RING (1<<VCS) @@ -1755,12 +1810,13 @@ struct drm_i915_file_private { #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define HAS_IPS(dev) (IS_ULT(dev)) +#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) -#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) +#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) -#define HAS_PSR(dev) (IS_HASWELL(dev)) +#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) +#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 @@ -1784,27 +1840,6 @@ struct drm_i915_file_private { #include "i915_trace.h" -/** - * RC6 is a special power stage which allows the GPU to enter an very - * low-voltage mode when idle, using down to 0V while at this stage. This - * stage is entered automatically when the GPU is idle when RC6 support is - * enabled, and as soon as new workload arises GPU wakes up automatically as well. - * - * There are different RC6 modes available in Intel GPU, which differentiate - * among each other with the latency required to enter and leave RC6 and - * voltage consumed by the GPU in different states. - * - * The combination of the following flags define which states GPU is allowed - * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and - * RC6pp is deepest RC6. Their support by hardware varies according to the - * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one - * which brings the most power savings; deeper states save more power, but - * require higher latency to switch to and wake up. - */ -#define INTEL_RC6_ENABLE (1<<0) -#define INTEL_RC6p_ENABLE (1<<1) -#define INTEL_RC6pp_ENABLE (1<<2) - extern const struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc __always_unused; @@ -1878,10 +1913,10 @@ extern void intel_uncore_check_errors(struct drm_device *dev); extern void intel_uncore_fini(struct drm_device *dev); void -i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); +i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); void -i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); +i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask); /* i915_gem.c */ int i915_gem_init_ioctl(struct drm_device *dev, void *data, @@ -2175,6 +2210,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj); +void i915_check_and_clear_faults(struct drm_device *dev); +void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 34df59b660f..12bbd5eac70 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -261,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file, struct drm_mode_create_dumb *args) { /* have to work out size/pitch and return them */ - args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); + args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); args->size = args->pitch * args->height; return i915_gem_create(file, dev, args->size, &args->handle); @@ -2954,6 +2954,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, obj->stride, obj->tiling_mode); switch (INTEL_INFO(dev)->gen) { + case 8: case 7: case 6: case 5: @@ -4361,6 +4362,8 @@ void i915_gem_init_swizzling(struct drm_device *dev) I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); else if (IS_GEN7(dev)) I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); + else if (IS_GEN8(dev)) + I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); else BUG(); } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index cc619c13877..72a3df32292 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev) else ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; break; + case 8: + ret = GEN8_CXT_TOTAL_SIZE; + break; default: BUG(); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 0ce0d47e4b0..885d595e0e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -212,6 +212,7 @@ static int relocate_entry_cpu(struct drm_i915_gem_object *obj, struct drm_i915_gem_relocation_entry *reloc) { + struct drm_device *dev = obj->base.dev; uint32_t page_offset = offset_in_page(reloc->offset); char *vaddr; int ret = -EINVAL; @@ -223,6 +224,19 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, vaddr = kmap_atomic(i915_gem_object_get_page(obj, reloc->offset >> PAGE_SHIFT)); *(uint32_t *)(vaddr + page_offset) = reloc->delta; + + if (INTEL_INFO(dev)->gen >= 8) { + page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + + if (page_offset == 0) { + kunmap_atomic(vaddr); + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); + } + + *(uint32_t *)(vaddr + page_offset) = 0; + } + kunmap_atomic(vaddr); return 0; @@ -253,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, reloc_entry = (uint32_t __iomem *) (reloc_page + offset_in_page(reloc->offset)); iowrite32(reloc->delta, reloc_entry); + + if (INTEL_INFO(dev)->gen >= 8) { + reloc_entry += 1; + + if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) { + io_mapping_unmap_atomic(reloc_page); + reloc_page = io_mapping_map_atomic_wc( + dev_priv->gtt.mappable, + reloc->offset + sizeof(uint32_t)); + reloc_entry = reloc_page; + } + + iowrite32(0, reloc_entry); + } + io_mapping_unmap_atomic(reloc_page); return 0; @@ -323,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return 0; /* Check that the relocation address is valid... */ - if (unlikely(reloc->offset > obj->base.size - 4)) { + if (unlikely(reloc->offset > + obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) { DRM_DEBUG("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, @@ -1116,8 +1146,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. - * hsw should have this fixed, but let's be paranoid and do it - * unconditionally for now. */ + * hsw should have this fixed, but bdw mucks it up again. */ if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e999496532c..3620a1b0a73 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -30,6 +30,8 @@ #define GEN6_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) +typedef uint64_t gen8_gtt_pte_t; +typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) @@ -57,10 +59,46 @@ #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) +#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) +#define GEN8_LEGACY_PDPS 4 + +#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) +#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ +#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ +#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ + +static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid) +{ + gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; + pte |= addr; + if (level != I915_CACHE_NONE) + pte |= PPAT_CACHED_INDEX; + else + pte |= PPAT_UNCACHED_INDEX; + return pte; +} + +static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, + dma_addr_t addr, + enum i915_cache_level level) +{ + gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; + pde |= addr; + if (level != I915_CACHE_NONE) + pde |= PPAT_CACHED_PDE_INDEX; + else + pde |= PPAT_UNCACHED_INDEX; + return pde; +} + static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -79,9 +117,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, } static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -105,9 +144,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); /* Mark the page as writeable. Other platforms don't have a @@ -122,9 +162,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, } static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); if (level != I915_CACHE_NONE) @@ -134,9 +175,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, } static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); switch (level) { @@ -153,6 +195,257 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +/* Broadwell Page Directory Pointer Descriptors */ +static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, + uint64_t val) +{ + int ret; + + BUG_ON(entry >= 4); + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); + intel_ring_emit(ring, (u32)(val >> 32)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); + intel_ring_emit(ring, (u32)(val)); + intel_ring_advance(ring); + + return 0; +} + +static int gen8_ppgtt_enable(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + int i, j, ret; + + /* bit of a hack to find the actual last used pd */ + int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; + + for_each_ring(ring, dev_priv, j) { + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + } + + for (i = used_pd - 1; i >= 0; i--) { + dma_addr_t addr = ppgtt->pd_dma_addr[i]; + for_each_ring(ring, dev_priv, j) { + ret = gen8_write_pdp(ring, i, addr); + if (ret) + return ret; + } + } + return 0; +} + +static void gen8_ppgtt_clear_range(struct i915_address_space *vm, + unsigned first_entry, + unsigned num_entries, + bool use_scratch) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + gen8_gtt_pte_t *pt_vaddr, scratch_pte; + unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; + unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE; + unsigned last_pte, i; + + scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, + I915_CACHE_LLC, use_scratch); + + while (num_entries) { + struct page *page_table = &ppgtt->gen8_pt_pages[act_pt]; + + last_pte = first_pte + num_entries; + if (last_pte > GEN8_PTES_PER_PAGE) + last_pte = GEN8_PTES_PER_PAGE; + + pt_vaddr = kmap_atomic(page_table); + + for (i = first_pte; i < last_pte; i++) + pt_vaddr[i] = scratch_pte; + + kunmap_atomic(pt_vaddr); + + num_entries -= last_pte - first_pte; + first_pte = 0; + act_pt++; + } +} + +static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, + struct sg_table *pages, + unsigned first_entry, + enum i915_cache_level cache_level) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + gen8_gtt_pte_t *pt_vaddr; + unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; + unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; + struct sg_page_iter sg_iter; + + pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); + for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { + dma_addr_t page_addr; + + page_addr = sg_dma_address(sg_iter.sg) + + (sg_iter.sg_pgoffset << PAGE_SHIFT); + pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level, + true); + if (++act_pte == GEN8_PTES_PER_PAGE) { + kunmap_atomic(pt_vaddr); + act_pt++; + pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); + act_pte = 0; + + } + } + kunmap_atomic(pt_vaddr); +} + +static void gen8_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + int i, j; + + for (i = 0; i < ppgtt->num_pd_pages ; i++) { + if (ppgtt->pd_dma_addr[i]) { + pci_unmap_page(ppgtt->base.dev->pdev, + ppgtt->pd_dma_addr[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + if (addr) + pci_unmap_page(ppgtt->base.dev->pdev, + addr, + PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + + } + } + kfree(ppgtt->gen8_pt_dma_addr[i]); + } + + __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); + __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); +} + +/** + * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a + * net effect resembling a 2-level page table in normal x86 terms. Each PDP + * represents 1GB of memory + * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space. + * + * TODO: Do something with the size parameter + **/ +static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) +{ + struct page *pt_pages; + int i, j, ret = -ENOMEM; + const int max_pdp = DIV_ROUND_UP(size, 1 << 30); + const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; + + if (size % (1<<30)) + DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); + + /* FIXME: split allocation into smaller pieces. For now we only ever do + * this once, but with full PPGTT, the multiple contiguous allocations + * will be bad. + */ + ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); + if (!ppgtt->pd_pages) + return -ENOMEM; + + pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT)); + if (!pt_pages) { + __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); + return -ENOMEM; + } + + ppgtt->gen8_pt_pages = pt_pages; + ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); + ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); + ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; + ppgtt->enable = gen8_ppgtt_enable; + ppgtt->base.clear_range = gen8_ppgtt_clear_range; + ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; + ppgtt->base.cleanup = gen8_ppgtt_cleanup; + + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); + + /* + * - Create a mapping for the page directories. + * - For each page directory: + * allocate space for page table mappings. + * map each page table + */ + for (i = 0; i < max_pdp; i++) { + dma_addr_t temp; + temp = pci_map_page(ppgtt->base.dev->pdev, + &ppgtt->pd_pages[i], 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp)) + goto err_out; + + ppgtt->pd_dma_addr[i] = temp; + + ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL); + if (!ppgtt->gen8_pt_dma_addr[i]) + goto err_out; + + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j]; + temp = pci_map_page(ppgtt->base.dev->pdev, + p, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp)) + goto err_out; + + ppgtt->gen8_pt_dma_addr[i][j] = temp; + } + } + + /* For now, the PPGTT helper functions all require that the PDEs are + * plugged in correctly. So we do that now/here. For aliasing PPGTT, we + * will never need to touch the PDEs again */ + for (i = 0; i < max_pdp; i++) { + gen8_ppgtt_pde_t *pd_vaddr; + pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, + I915_CACHE_LLC); + } + kunmap_atomic(pd_vaddr); + } + + ppgtt->base.clear_range(&ppgtt->base, 0, + ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE, + true); + + DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", + ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); + DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", + ppgtt->num_pt_pages, + (ppgtt->num_pt_pages - num_pt_pages) + + size % (1<<30)); + return 0; + +err_out: + ppgtt->base.cleanup(&ppgtt->base); + return ret; +} + static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; @@ -236,7 +529,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev) /* PPGTT support for Sandybdrige/Gen6 and later */ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_entry, - unsigned num_entries) + unsigned num_entries, + bool use_scratch) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); @@ -245,7 +539,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); while (num_entries) { last_pte = first_pte + num_entries; @@ -282,7 +576,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, dma_addr_t page_addr; page_addr = sg_page_iter_dma_address(&sg_iter); - pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); + pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); if (++act_pte == I915_PPGTT_PT_ENTRIES) { kunmap_atomic(pt_vaddr); act_pt++; @@ -367,7 +661,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } ppgtt->base.clear_range(&ppgtt->base, 0, - ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); + ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); @@ -404,6 +698,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 8) ret = gen6_ppgtt_init(ppgtt); + else if (IS_GEN8(dev)) + ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); else BUG(); @@ -444,7 +740,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, { ppgtt->base.clear_range(&ppgtt->base, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); + obj->base.size >> PAGE_SHIFT, + true); } extern int intel_iommu_gfx_mapped; @@ -485,15 +782,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } +void i915_check_and_clear_faults(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; + + if (INTEL_INFO(dev)->gen < 6) + return; + + for_each_ring(ring, dev_priv, i) { + u32 fault_reg; + fault_reg = I915_READ(RING_FAULT_REG(ring)); + if (fault_reg & RING_FAULT_VALID) { + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08lx\\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault_reg & PAGE_MASK, + fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault_reg), + RING_FAULT_FAULT_TYPE(fault_reg)); + I915_WRITE(RING_FAULT_REG(ring), + fault_reg & ~RING_FAULT_VALID); + } + } + POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); +} + +void i915_gem_suspend_gtt_mappings(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Don't bother messing with faults pre GEN6 as we have little + * documentation supporting that it's a good idea. + */ + if (INTEL_INFO(dev)->gen < 6) + return; + + i915_check_and_clear_faults(dev); + + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + dev_priv->gtt.base.start / PAGE_SIZE, + dev_priv->gtt.base.total / PAGE_SIZE, + false); +} + void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + i915_check_and_clear_faults(dev); + /* First fill our portion of the GTT with scratch pages */ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, dev_priv->gtt.base.start / PAGE_SIZE, - dev_priv->gtt.base.total / PAGE_SIZE); + dev_priv->gtt.base.total / PAGE_SIZE, + true); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { i915_gem_clflush_object(obj, obj->pin_display); @@ -516,6 +863,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) return 0; } +static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) +{ +#ifdef writeq + writeq(pte, addr); +#else + iowrite32((u32)pte, addr); + iowrite32(pte >> 32, addr + 4); +#endif +} + +static void gen8_ggtt_insert_entries(struct i915_address_space *vm, + struct sg_table *st, + unsigned int first_entry, + enum i915_cache_level level) +{ + struct drm_i915_private *dev_priv = vm->dev->dev_private; + gen8_gtt_pte_t __iomem *gtt_entries = + (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + int i = 0; + struct sg_page_iter sg_iter; + dma_addr_t addr; + + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { + addr = sg_dma_address(sg_iter.sg) + + (sg_iter.sg_pgoffset << PAGE_SHIFT); + gen8_set_pte(>t_entries[i], + gen8_pte_encode(addr, level, true)); + i++; + } + + /* + * XXX: This serves as a posting read to make sure that the PTE has + * actually been updated. There is some concern that even though + * registers and PTEs are within the same BAR that they are potentially + * of NUMA access patterns. Therefore, even with the way we assume + * hardware should work, we must keep this posting read for paranoia. + */ + if (i != 0) + WARN_ON(readq(>t_entries[i-1]) + != gen8_pte_encode(addr, level, true)); + +#if 0 /* TODO: Still needed on GEN8? */ + /* This next bit makes the above posting read even more important. We + * want to flush the TLBs only after we're certain all the PTE updates + * have finished. + */ + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); +#endif +} + /* * Binds an object into the global gtt with the specified cache level. The object * will be accessible to the GPU via commands whose operands reference offsets @@ -536,7 +934,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(vm->pte_encode(addr, level), >t_entries[i]); + iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); i++; } @@ -548,7 +946,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, */ if (i != 0) WARN_ON(readl(>t_entries[i-1]) != - vm->pte_encode(addr, level)); + vm->pte_encode(addr, level, true)); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -558,9 +956,34 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, POSTING_READ(GFX_FLSH_CNTL_GEN6); } +static void gen8_ggtt_clear_range(struct i915_address_space *vm, + unsigned int first_entry, + unsigned int num_entries, + bool use_scratch) +{ + struct drm_i915_private *dev_priv = vm->dev->dev_private; + gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = + (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; + const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + scratch_pte = gen8_pte_encode(vm->scratch.addr, + I915_CACHE_LLC, + use_scratch); + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); + readl(gtt_base); +} + static void gen6_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, - unsigned int num_entries) + unsigned int num_entries, + bool use_scratch) { struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = @@ -573,13 +996,13 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); + for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); readl(gtt_base); } - static void i915_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, unsigned int pg_start, @@ -594,7 +1017,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm, static void i915_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, - unsigned int num_entries) + unsigned int num_entries, + bool unused) { intel_gtt_clear_range(first_entry, num_entries); } @@ -622,7 +1046,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, entry, - obj->base.size >> PAGE_SHIFT); + obj->base.size >> PAGE_SHIFT, + true); obj->has_global_gtt_mapping = 0; } @@ -659,6 +1084,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, *end -= 4096; } } + void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, unsigned long mappable_end, @@ -709,11 +1135,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); + ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); } /* And finally clear the reserved guard page */ - ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); + ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); } static bool @@ -756,7 +1182,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev) DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); drm_mm_takedown(&dev_priv->gtt.base.mm); - gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; + if (INTEL_INFO(dev)->gen < 8) + gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE; } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); } @@ -806,6 +1233,15 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) return snb_gmch_ctl << 20; } +static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) +{ + bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; + bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; + if (bdw_gmch_ctl) + bdw_gmch_ctl = 1 << bdw_gmch_ctl; + return bdw_gmch_ctl << 20; +} + static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) { snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; @@ -813,6 +1249,108 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) return snb_gmch_ctl << 25; /* 32 MB units */ } +static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) +{ + bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; + bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; + return bdw_gmch_ctl << 25; /* 32 MB units */ +} + +static int ggtt_probe_common(struct drm_device *dev, + size_t gtt_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + phys_addr_t gtt_bus_addr; + int ret; + + /* For Modern GENs the PTEs and register space are split in the BAR */ + gtt_bus_addr = pci_resource_start(dev->pdev, 0) + + (pci_resource_len(dev->pdev, 0) / 2); + + dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); + if (!dev_priv->gtt.gsm) { + DRM_ERROR("Failed to map the gtt page table\n"); + return -ENOMEM; + } + + ret = setup_scratch_page(dev); + if (ret) { + DRM_ERROR("Scratch setup failed\n"); + /* iounmap will also get called at remove, but meh */ + iounmap(dev_priv->gtt.gsm); + } + + return ret; +} + +/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability + * bits. When using advanced contexts each context stores its own PAT, but + * writing this data shouldn't be harmful even in those cases. */ +static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv) +{ +#define GEN8_PPAT_UC (0<<0) +#define GEN8_PPAT_WC (1<<0) +#define GEN8_PPAT_WT (2<<0) +#define GEN8_PPAT_WB (3<<0) +#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) +/* FIXME(BDW): Bspec is completely confused about cache control bits. */ +#define GEN8_PPAT_LLC (1<<2) +#define GEN8_PPAT_LLCELLC (2<<2) +#define GEN8_PPAT_LLCeLLC (3<<2) +#define GEN8_PPAT_AGE(x) (x<<4) +#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) + uint64_t pat; + + pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ + GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ + GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ + GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ + GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | + GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | + GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | + GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + + /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b + * write would work. */ + I915_WRITE(GEN8_PRIVATE_PAT, pat); + I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); +} + +static int gen8_gmch_probe(struct drm_device *dev, + size_t *gtt_total, + size_t *stolen, + phys_addr_t *mappable_base, + unsigned long *mappable_end) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned int gtt_size; + u16 snb_gmch_ctl; + int ret; + + /* TODO: We're not aware of mappable constraints on gen8 yet */ + *mappable_base = pci_resource_start(dev->pdev, 2); + *mappable_end = pci_resource_len(dev->pdev, 2); + + if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) + pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); + + pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + + *stolen = gen8_get_stolen_size(snb_gmch_ctl); + + gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); + *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; + + gen8_setup_private_ppat(dev_priv); + + ret = ggtt_probe_common(dev, gtt_size); + + dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; + dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; + + return ret; +} + static int gen6_gmch_probe(struct drm_device *dev, size_t *gtt_total, size_t *stolen, @@ -820,7 +1358,6 @@ static int gen6_gmch_probe(struct drm_device *dev, unsigned long *mappable_end) { struct drm_i915_private *dev_priv = dev->dev_private; - phys_addr_t gtt_bus_addr; unsigned int gtt_size; u16 snb_gmch_ctl; int ret; @@ -840,24 +1377,13 @@ static int gen6_gmch_probe(struct drm_device *dev, if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); *stolen = gen6_get_stolen_size(snb_gmch_ctl); - *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; - /* For Modern GENs the PTEs and register space are split in the BAR */ - gtt_bus_addr = pci_resource_start(dev->pdev, 0) + - (pci_resource_len(dev->pdev, 0) / 2); - - dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); - if (!dev_priv->gtt.gsm) { - DRM_ERROR("Failed to map the gtt page table\n"); - return -ENOMEM; - } + gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); + *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; - ret = setup_scratch_page(dev); - if (ret) - DRM_ERROR("Scratch setup failed\n"); + ret = ggtt_probe_common(dev, gtt_size); dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; @@ -911,7 +1437,7 @@ int i915_gem_gtt_init(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 5) { gtt->gtt_probe = i915_gmch_probe; gtt->base.cleanup = i915_gmch_remove; - } else { + } else if (INTEL_INFO(dev)->gen < 8) { gtt->gtt_probe = gen6_gmch_probe; gtt->base.cleanup = gen6_gmch_remove; if (IS_HASWELL(dev) && dev_priv->ellc_size) @@ -924,6 +1450,9 @@ int i915_gem_gtt_init(struct drm_device *dev) gtt->base.pte_encode = ivb_pte_encode; else gtt->base.pte_encode = snb_pte_encode; + } else { + dev_priv->gtt.gtt_probe = gen8_gmch_probe; + dev_priv->gtt.base.cleanup = gen6_gmch_remove; } ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 5dde8102647..79dcb8f896c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -249,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); if (ring == RCS && INTEL_INFO(dev)->gen >= 4) err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); - + if (INTEL_INFO(dev)->gen >= 4) + err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); if (INTEL_INFO(dev)->gen >= 4) err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); @@ -623,6 +624,7 @@ static void i915_gem_record_fences(struct drm_device *dev, /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 8: case 7: case 6: for (i = 0; i < dev_priv->num_fence_regs; i++) @@ -725,6 +727,7 @@ static void i915_record_ring_state(struct drm_device *dev, error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); if (ring->id == RCS) error->bbaddr = I915_READ64(BB_ADDR); + error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); } else { error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); error->ipeir[ring->id] = I915_READ(IPEIR); @@ -1042,6 +1045,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) default: WARN_ONCE(1, "Unsupported platform\n"); case 7: + case 8: instdone[0] = I915_READ(GEN7_INSTDONE_1); instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a2274c71327..5d1dedc02f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -270,6 +270,21 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, } } +static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, + enum pipe pipe, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + assert_spin_locked(&dev_priv->irq_lock); + + if (enable) + dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; + else + dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); +} + /** * ibx_display_interrupt_update - update SDEIMR * @dev_priv: driver private @@ -382,6 +397,8 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, ironlake_set_fifo_underrun_reporting(dev, pipe, enable); else if (IS_GEN7(dev)) ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); + else if (IS_GEN8(dev)) + broadwell_set_fifo_underrun_reporting(dev, pipe, enable); done: spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -442,7 +459,7 @@ done: void -i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) { u32 reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0x7fff0000; @@ -459,7 +476,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) } void -i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) { u32 reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0x7fff0000; @@ -487,9 +504,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); if (INTEL_INFO(dev)->gen >= 4) - i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_A, + PIPE_LEGACY_BLC_EVENT_ENABLE); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -1150,6 +1168,56 @@ static void snb_gt_irq_handler(struct drm_device *dev, ivybridge_parity_error_irq_handler(dev, gt_iir); } +static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 master_ctl) +{ + u32 rcs, bcs, vcs; + uint32_t tmp = 0; + irqreturn_t ret = IRQ_NONE; + + if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { + tmp = I915_READ(GEN8_GT_IIR(0)); + if (tmp) { + ret = IRQ_HANDLED; + rcs = tmp >> GEN8_RCS_IRQ_SHIFT; + bcs = tmp >> GEN8_BCS_IRQ_SHIFT; + if (rcs & GT_RENDER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[RCS]); + if (bcs & GT_RENDER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[BCS]); + I915_WRITE(GEN8_GT_IIR(0), tmp); + } else + DRM_ERROR("The master control interrupt lied (GT0)!\n"); + } + + if (master_ctl & GEN8_GT_VCS1_IRQ) { + tmp = I915_READ(GEN8_GT_IIR(1)); + if (tmp) { + ret = IRQ_HANDLED; + vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; + if (vcs & GT_RENDER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); + I915_WRITE(GEN8_GT_IIR(1), tmp); + } else + DRM_ERROR("The master control interrupt lied (GT1)!\n"); + } + + if (master_ctl & GEN8_GT_VECS_IRQ) { + tmp = I915_READ(GEN8_GT_IIR(3)); + if (tmp) { + ret = IRQ_HANDLED; + vcs = tmp >> GEN8_VECS_IRQ_SHIFT; + if (vcs & GT_RENDER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VECS]); + I915_WRITE(GEN8_GT_IIR(3), tmp); + } else + DRM_ERROR("The master control interrupt lied (GT3)!\n"); + } + + return ret; +} + #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_THRESHOLD 5 @@ -1222,25 +1290,29 @@ static void dp_aux_irq_handler(struct drm_device *dev) } #if defined(CONFIG_DEBUG_FS) -static void display_pipe_crc_update(struct drm_device *dev, enum pipe pipe, - uint32_t crc0, uint32_t crc1, - uint32_t crc2, uint32_t crc3, - uint32_t crc4) +static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, + uint32_t crc0, uint32_t crc1, + uint32_t crc2, uint32_t crc3, + uint32_t crc4) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_pipe_crc_entry *entry; int head, tail; + spin_lock(&pipe_crc->lock); + if (!pipe_crc->entries) { + spin_unlock(&pipe_crc->lock); DRM_ERROR("spurious interrupt\n"); return; } - head = atomic_read(&pipe_crc->head); - tail = atomic_read(&pipe_crc->tail); + head = pipe_crc->head; + tail = pipe_crc->tail; if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { + spin_unlock(&pipe_crc->lock); DRM_ERROR("CRC buffer overflowing\n"); return; } @@ -1255,48 +1327,63 @@ static void display_pipe_crc_update(struct drm_device *dev, enum pipe pipe, entry->crc[4] = crc4; head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); - atomic_set(&pipe_crc->head, head); + pipe_crc->head = head; + + spin_unlock(&pipe_crc->lock); wake_up_interruptible(&pipe_crc->wq); } +#else +static inline void +display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, + uint32_t crc0, uint32_t crc1, + uint32_t crc2, uint32_t crc3, + uint32_t crc4) {} +#endif + -static void hsw_pipe_crc_update(struct drm_device *dev, enum pipe pipe) +static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - display_pipe_crc_update(dev, pipe, - I915_READ(PIPE_CRC_RES_1_IVB(pipe)), - 0, 0, 0, 0); + display_pipe_crc_irq_handler(dev, pipe, + I915_READ(PIPE_CRC_RES_1_IVB(pipe)), + 0, 0, 0, 0); } -static void ivb_pipe_crc_update(struct drm_device *dev, enum pipe pipe) +static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - display_pipe_crc_update(dev, pipe, - I915_READ(PIPE_CRC_RES_1_IVB(pipe)), - I915_READ(PIPE_CRC_RES_2_IVB(pipe)), - I915_READ(PIPE_CRC_RES_3_IVB(pipe)), - I915_READ(PIPE_CRC_RES_4_IVB(pipe)), - I915_READ(PIPE_CRC_RES_5_IVB(pipe))); + display_pipe_crc_irq_handler(dev, pipe, + I915_READ(PIPE_CRC_RES_1_IVB(pipe)), + I915_READ(PIPE_CRC_RES_2_IVB(pipe)), + I915_READ(PIPE_CRC_RES_3_IVB(pipe)), + I915_READ(PIPE_CRC_RES_4_IVB(pipe)), + I915_READ(PIPE_CRC_RES_5_IVB(pipe))); } -static void ilk_pipe_crc_update(struct drm_device *dev, enum pipe pipe) +static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t res1, res2; + + if (INTEL_INFO(dev)->gen >= 3) + res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); + else + res1 = 0; - display_pipe_crc_update(dev, pipe, - I915_READ(PIPE_CRC_RES_RED_ILK(pipe)), - I915_READ(PIPE_CRC_RES_GREEN_ILK(pipe)), - I915_READ(PIPE_CRC_RES_BLUE_ILK(pipe)), - I915_READ(PIPE_CRC_RES_RES1_ILK(pipe)), - I915_READ(PIPE_CRC_RES_RES2_ILK(pipe))); + if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); + else + res2 = 0; + + display_pipe_crc_irq_handler(dev, pipe, + I915_READ(PIPE_CRC_RES_RED(pipe)), + I915_READ(PIPE_CRC_RES_GREEN(pipe)), + I915_READ(PIPE_CRC_RES_BLUE(pipe)), + res1, res2); } -#else -static inline void hsw_pipe_crc_update(struct drm_device *dev, int pipe) {} -static inline void ivb_pipe_crc_update(struct drm_device *dev, int pipe) {} -static inline void ilk_pipe_crc_update(struct drm_device *dev, int pipe) {} -#endif /* The RPS events need forcewake, so we add them to a work queue and mask their * IMR bits until the work is done. Other interrupts can be processed without @@ -1365,13 +1452,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); for_each_pipe(pipe) { - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) drm_handle_vblank(dev, pipe); if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { intel_prepare_page_flip(dev, pipe); intel_finish_page_flip(dev, pipe); } + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); } /* Consume port. Then clear IIR or we'll miss events */ @@ -1475,9 +1565,9 @@ static void ivb_err_int_handler(struct drm_device *dev) if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { if (IS_IVYBRIDGE(dev)) - ivb_pipe_crc_update(dev, pipe); + ivb_pipe_crc_irq_handler(dev, pipe); else - hsw_pipe_crc_update(dev, pipe); + hsw_pipe_crc_irq_handler(dev, pipe); } } @@ -1550,6 +1640,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) { struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; if (de_iir & DE_AUX_CHANNEL_A) dp_aux_irq_handler(dev); @@ -1557,37 +1648,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) if (de_iir & DE_GSE) intel_opregion_asle_intr(dev); - if (de_iir & DE_PIPEA_VBLANK) - drm_handle_vblank(dev, 0); - - if (de_iir & DE_PIPEB_VBLANK) - drm_handle_vblank(dev, 1); - if (de_iir & DE_POISON) DRM_ERROR("Poison interrupt\n"); - if (de_iir & DE_PIPEA_FIFO_UNDERRUN) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) - DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); - - if (de_iir & DE_PIPEB_FIFO_UNDERRUN) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) - DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); - - if (de_iir & DE_PIPEA_CRC_DONE) - ilk_pipe_crc_update(dev, PIPE_A); + for_each_pipe(pipe) { + if (de_iir & DE_PIPE_VBLANK(pipe)) + drm_handle_vblank(dev, pipe); - if (de_iir & DE_PIPEB_CRC_DONE) - ilk_pipe_crc_update(dev, PIPE_B); + if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) + if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) + DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", + pipe_name(pipe)); - if (de_iir & DE_PLANEA_FLIP_DONE) { - intel_prepare_page_flip(dev, 0); - intel_finish_page_flip_plane(dev, 0); - } + if (de_iir & DE_PIPE_CRC_DONE(pipe)) + i9xx_pipe_crc_irq_handler(dev, pipe); - if (de_iir & DE_PLANEB_FLIP_DONE) { - intel_prepare_page_flip(dev, 1); - intel_finish_page_flip_plane(dev, 1); + /* plane/pipes map 1:1 on ilk+ */ + if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } } /* check event from PCH */ @@ -1610,7 +1690,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; + enum pipe i; if (de_iir & DE_ERR_INT_IVB) ivb_err_int_handler(dev); @@ -1621,10 +1701,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) if (de_iir & DE_GSE_IVB) intel_opregion_asle_intr(dev); - for (i = 0; i < 3; i++) { - if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) + for_each_pipe(i) { + if (de_iir & (DE_PIPE_VBLANK_IVB(i))) drm_handle_vblank(dev, i); - if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { + + /* plane/pipes map 1:1 on ilk+ */ + if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { intel_prepare_page_flip(dev, i); intel_finish_page_flip_plane(dev, i); } @@ -1709,6 +1791,117 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) return ret; } +static irqreturn_t gen8_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 master_ctl; + irqreturn_t ret = IRQ_NONE; + uint32_t tmp = 0; + enum pipe pipe; + + atomic_inc(&dev_priv->irq_received); + + master_ctl = I915_READ(GEN8_MASTER_IRQ); + master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; + if (!master_ctl) + return IRQ_NONE; + + I915_WRITE(GEN8_MASTER_IRQ, 0); + POSTING_READ(GEN8_MASTER_IRQ); + + ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); + + if (master_ctl & GEN8_DE_MISC_IRQ) { + tmp = I915_READ(GEN8_DE_MISC_IIR); + if (tmp & GEN8_DE_MISC_GSE) + intel_opregion_asle_intr(dev); + else if (tmp) + DRM_ERROR("Unexpected DE Misc interrupt\n"); + else + DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); + + if (tmp) { + I915_WRITE(GEN8_DE_MISC_IIR, tmp); + ret = IRQ_HANDLED; + } + } + + if (master_ctl & GEN8_DE_PORT_IRQ) { + tmp = I915_READ(GEN8_DE_PORT_IIR); + if (tmp & GEN8_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + else if (tmp) + DRM_ERROR("Unexpected DE Port interrupt\n"); + else + DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); + + if (tmp) { + I915_WRITE(GEN8_DE_PORT_IIR, tmp); + ret = IRQ_HANDLED; + } + } + + for_each_pipe(pipe) { + uint32_t pipe_iir; + + if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) + continue; + + pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); + if (pipe_iir & GEN8_PIPE_VBLANK) + drm_handle_vblank(dev, pipe); + + if (pipe_iir & GEN8_PIPE_FLIP_DONE) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } + + if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); + + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { + if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, + false)) + DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", + pipe_name(pipe)); + } + + if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { + DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", + pipe_name(pipe), + pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); + } + + if (pipe_iir) { + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + } else + DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); + } + + if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { + /* + * FIXME(BDW): Assume for now that the new interrupt handling + * scheme also closed the SDE interrupt handling race we've seen + * on older pch-split platforms. But this needs testing. + */ + u32 pch_iir = I915_READ(SDEIIR); + + cpt_irq_handler(dev, pch_iir); + + if (pch_iir) { + I915_WRITE(SDEIIR, pch_iir); + ret = IRQ_HANDLED; + } + } + + I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); + POSTING_READ(GEN8_MASTER_IRQ); + + return ret; +} + static void i915_error_wake_up(struct drm_i915_private *dev_priv, bool reset_completed) { @@ -2027,7 +2220,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : - DE_PIPE_VBLANK_ILK(pipe); + DE_PIPE_VBLANK(pipe); if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; @@ -2050,7 +2243,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); imr = I915_READ(VLV_IMR); - if (pipe == 0) + if (pipe == PIPE_A) imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; else imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; @@ -2062,6 +2255,22 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe) return 0; } +static int gen8_enable_vblank(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; + + if (!i915_pipe_enabled(dev, pipe)) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + return 0; +} + /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -2085,7 +2294,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : - DE_PIPE_VBLANK_ILK(pipe); + DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); ironlake_disable_display_irq(dev_priv, bit); @@ -2102,7 +2311,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe) i915_disable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); imr = I915_READ(VLV_IMR); - if (pipe == 0) + if (pipe == PIPE_A) imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; else imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; @@ -2110,6 +2319,21 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } +static void gen8_disable_vblank(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + static u32 ring_last_seqno(struct intel_ring_buffer *ring) { @@ -2268,8 +2492,12 @@ static void i915_hangcheck_elapsed(unsigned long data) if (waitqueue_active(&ring->irq_queue)) { /* Issue a wake-up to catch stuck h/w. */ if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { - DRM_ERROR("Hangcheck timer elapsed... %s idle\n", - ring->name); + if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) + DRM_ERROR("Hangcheck timer elapsed... %s idle\n", + ring->name); + else + DRM_INFO("Fake missed irq on %s\n", + ring->name); wake_up_all(&ring->irq_queue); } /* Safeguard against driver failure */ @@ -2440,6 +2668,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev) POSTING_READ(VLV_IER); } +static void gen8_irq_preinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + I915_WRITE(GEN8_MASTER_IRQ, 0); + POSTING_READ(GEN8_MASTER_IRQ); + + /* IIR can theoretically queue up two events. Be paranoid */ +#define GEN8_IRQ_INIT_NDX(type, which) do { \ + I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ + POSTING_READ(GEN8_##type##_IMR(which)); \ + I915_WRITE(GEN8_##type##_IER(which), 0); \ + I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ + POSTING_READ(GEN8_##type##_IIR(which)); \ + I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ + } while (0) + +#define GEN8_IRQ_INIT(type) do { \ + I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ + POSTING_READ(GEN8_##type##_IMR); \ + I915_WRITE(GEN8_##type##_IER, 0); \ + I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ + POSTING_READ(GEN8_##type##_IIR); \ + I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ + } while (0) + + GEN8_IRQ_INIT_NDX(GT, 0); + GEN8_IRQ_INIT_NDX(GT, 1); + GEN8_IRQ_INIT_NDX(GT, 2); + GEN8_IRQ_INIT_NDX(GT, 3); + + for_each_pipe(pipe) { + GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); + } + + GEN8_IRQ_INIT(DE_PORT); + GEN8_IRQ_INIT(DE_MISC); + GEN8_IRQ_INIT(PCU); +#undef GEN8_IRQ_INIT +#undef GEN8_IRQ_INIT_NDX + + POSTING_READ(GEN8_PCU_IIR); +} + static void ibx_hpd_irq_setup(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2593,7 +2868,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask; - u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; + u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | + PIPE_CRC_DONE_ENABLE; unsigned long irqflags; enable_mask = I915_DISPLAY_PORT_INTERRUPT; @@ -2623,9 +2899,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, 0, pipestat_enable); - i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); - i915_enable_pipestat(dev_priv, 1, pipestat_enable); + i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); I915_WRITE(VLV_IIR, 0xffffffff); @@ -2644,6 +2920,117 @@ static int valleyview_irq_postinstall(struct drm_device *dev) return 0; } +static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) +{ + int i; + + /* These are interrupts we'll toggle with the ring mask register */ + uint32_t gt_interrupts[] = { + GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_RENDER_L3_PARITY_ERROR_INTERRUPT | + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, + GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, + 0, + GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT + }; + + for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { + u32 tmp = I915_READ(GEN8_GT_IIR(i)); + if (tmp) + DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", + i, tmp); + I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); + I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); + } + POSTING_READ(GEN8_GT_IER(0)); +} + +static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | + GEN8_PIPE_CDCLK_CRC_DONE | + GEN8_PIPE_FIFO_UNDERRUN | + GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; + int pipe; + dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; + dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; + dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; + + for_each_pipe(pipe) { + u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); + if (tmp) + DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", + pipe, tmp); + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); + } + POSTING_READ(GEN8_DE_PIPE_ISR(0)); + + I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); + I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); + POSTING_READ(GEN8_DE_PORT_IER); +} + +static int gen8_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + gen8_gt_irq_postinstall(dev_priv); + gen8_de_irq_postinstall(dev_priv); + + ibx_irq_postinstall(dev); + + I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); + POSTING_READ(GEN8_MASTER_IRQ); + + return 0; +} + +static void gen8_irq_uninstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + atomic_set(&dev_priv->irq_received, 0); + + I915_WRITE(GEN8_MASTER_IRQ, 0); + +#define GEN8_IRQ_FINI_NDX(type, which) do { \ + I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ + I915_WRITE(GEN8_##type##_IER(which), 0); \ + I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ + } while (0) + +#define GEN8_IRQ_FINI(type) do { \ + I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ + I915_WRITE(GEN8_##type##_IER, 0); \ + I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ + } while (0) + + GEN8_IRQ_FINI_NDX(GT, 0); + GEN8_IRQ_FINI_NDX(GT, 1); + GEN8_IRQ_FINI_NDX(GT, 2); + GEN8_IRQ_FINI_NDX(GT, 3); + + for_each_pipe(pipe) { + GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); + } + + GEN8_IRQ_FINI(DE_PORT); + GEN8_IRQ_FINI(DE_MISC); + GEN8_IRQ_FINI(PCU); +#undef GEN8_IRQ_FINI +#undef GEN8_IRQ_FINI_NDX + + POSTING_READ(GEN8_PCU_IIR); +} + static void valleyview_irq_uninstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2716,6 +3103,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev) static int i8xx_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -2736,6 +3124,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev) I915_USER_INTERRUPT); POSTING_READ16(IER); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + return 0; } @@ -2822,13 +3217,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) if (iir & I915_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); - if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, 0, iir)) - flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && + i8xx_handle_vblank(dev, pipe, iir)) + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); - if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, 1, iir)) - flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); + } iir = new_iir; } @@ -2875,6 +3271,7 @@ static int i915_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask; + unsigned long irqflags; I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -2910,6 +3307,13 @@ static int i915_irq_postinstall(struct drm_device *dev) i915_enable_asle_pipestat(dev); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + return 0; } @@ -3021,6 +3425,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) @@ -3119,7 +3526,9 @@ static int i965_irq_postinstall(struct drm_device *dev) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* @@ -3265,6 +3674,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); } @@ -3398,6 +3810,14 @@ void intel_irq_init(struct drm_device *dev) dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } else if (IS_GEN8(dev)) { + dev->driver->irq_handler = gen8_irq_handler; + dev->driver->irq_preinstall = gen8_irq_preinstall; + dev->driver->irq_postinstall = gen8_irq_postinstall; + dev->driver->irq_uninstall = gen8_irq_uninstall; + dev->driver->enable_vblank = gen8_enable_vblank; + dev->driver->disable_vblank = gen8_disable_vblank; + dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; } else if (HAS_PCH_SPLIT(dev)) { dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0e7488b6496..f9eafb6ed52 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -110,6 +110,9 @@ #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) #define PP_DIR_DCLV_2G 0xffffffff +#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) +#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) + #define GAM_ECOCHK 0x4090 #define ECOCHK_SNB_BIT (1<<10) #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) @@ -247,6 +250,7 @@ #define MI_BATCH_NON_SECURE_HSW (1<<13) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ +#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_UPDATE (1<<21) @@ -444,7 +448,7 @@ #define _DPIO_TX3_SWING_CTL4_A 0x690 #define _DPIO_TX3_SWING_CTL4_B 0x2a90 -#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \ +#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \ _DPIO_TX3_SWING_CTL4_B) /* @@ -655,9 +659,17 @@ #define ARB_MODE 0x04030 #define ARB_MODE_SWIZZLE_SNB (1<<4) #define ARB_MODE_SWIZZLE_IVB (1<<5) +#define GAMTARBMODE 0x04a08 +#define ARB_MODE_BWGTLB_DISABLE (1<<9) +#define ARB_MODE_SWIZZLE_BDW (1<<1) #define RENDER_HWS_PGA_GEN7 (0x04080) #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) +#define RING_FAULT_GTTSEL_MASK (1<<11) +#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) +#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) +#define RING_FAULT_VALID (1<<0) #define DONE_REG 0x40b0 +#define GEN8_PRIVATE_PAT 0x40e0 #define BSD_HWS_PGA_GEN7 (0x04180) #define BLT_HWS_PGA_GEN7 (0x04280) #define VEBOX_HWS_PGA_GEN7 (0x04380) @@ -718,6 +730,7 @@ #define NOPID 0x02094 #define HWSTAM 0x02098 #define DMA_FADD_I8XX 0x020d0 +#define RING_BBSTATE(base) ((base)+0x110) #define ERROR_GEN6 0x040a0 #define GEN7_ERR_INT 0x44040 @@ -736,6 +749,7 @@ #define FPGA_DBG_RM_NOCLAIM (1<<31) #define DERRMR 0x44050 +/* Note that HBLANK events are reserved on bdw+ */ #define DERRMR_PIPEA_SCANLINE (1<<0) #define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) #define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) @@ -769,6 +783,7 @@ #define _3D_CHICKEN3 0x02090 #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) +#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) @@ -1106,9 +1121,6 @@ _HSW_PIPE_SLICE_CHICKEN_1_A, + \ _HSW_PIPE_SLICE_CHICKEN_1_B) -#define HSW_CLKGATE_DISABLE_PART_1 0x46500 -#define HSW_DPFC_GATING_DISABLE (1<<23) - /* * GPIO regs */ @@ -1476,7 +1488,7 @@ #define MCHBAR_MIRROR_BASE_SNB 0x140000 /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ -#define DCLK 0x5e04 +#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) /** 915-945 and GM965 MCH register controlling DRAM channel access */ #define DCC 0x10200 @@ -1771,9 +1783,9 @@ #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) -#define GEN6_GT_PERF_STATUS 0x145948 -#define GEN6_RP_STATE_LIMITS 0x145994 -#define GEN6_RP_STATE_CAP 0x145998 +#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) +#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) +#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) /* * Logical Context regs @@ -1818,6 +1830,9 @@ * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. */ #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) +/* Same as Haswell, but 72064 bytes now. */ +#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) + #define VLV_CLK_CTL2 0x101104 #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 @@ -1843,36 +1858,58 @@ /* Pipe A CRC regs */ #define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) #define PIPE_CRC_ENABLE (1 << 31) +/* ivb+ source selection */ #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) #define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) #define PIPE_CRC_SOURCE_PF_IVB (2 << 29) +/* ilk+ source selection */ #define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) #define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) #define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) /* embedded DP port on the north display block, reserved on ivb */ #define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) #define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ +/* vlv source selection */ +#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27) +#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27) +#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27) +/* with DP port the pipe source is invalid */ +#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27) +#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27) +#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27) +/* gen3+ source selection */ +#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28) +#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28) +#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28) +/* with DP/TV port the pipe source is invalid */ +#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28) +#define PIPE_CRC_SOURCE_TV_PRE (4 << 28) +#define PIPE_CRC_SOURCE_TV_POST (5 << 28) +#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28) +#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28) +/* gen2 doesn't have source selection bits */ +#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30) + #define _PIPE_CRC_RES_1_A_IVB 0x60064 #define _PIPE_CRC_RES_2_A_IVB 0x60068 #define _PIPE_CRC_RES_3_A_IVB 0x6006c #define _PIPE_CRC_RES_4_A_IVB 0x60070 #define _PIPE_CRC_RES_5_A_IVB 0x60074 -#define _PIPE_CRC_RES_RED_A_ILK 0x60060 -#define _PIPE_CRC_RES_GREEN_A_ILK 0x60064 -#define _PIPE_CRC_RES_BLUE_A_ILK 0x60068 -#define _PIPE_CRC_RES_RES1_A_ILK 0x6006c -#define _PIPE_CRC_RES_RES2_A_ILK 0x60080 +#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060) +#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064) +#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068) +#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c) +#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080) /* Pipe B CRC regs */ -#define _PIPE_CRC_CTL_B 0x61050 #define _PIPE_CRC_RES_1_B_IVB 0x61064 #define _PIPE_CRC_RES_2_B_IVB 0x61068 #define _PIPE_CRC_RES_3_B_IVB 0x6106c #define _PIPE_CRC_RES_4_B_IVB 0x61070 #define _PIPE_CRC_RES_5_B_IVB 0x61074 -#define PIPE_CRC_CTL(pipe) _PIPE(pipe, _PIPE_CRC_CTL_A, _PIPE_CRC_CTL_B) +#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000) #define PIPE_CRC_RES_1_IVB(pipe) \ _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) #define PIPE_CRC_RES_2_IVB(pipe) \ @@ -1884,16 +1921,16 @@ #define PIPE_CRC_RES_5_IVB(pipe) \ _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) -#define PIPE_CRC_RES_RED_ILK(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A_ILK, 0x01000) -#define PIPE_CRC_RES_GREEN_ILK(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A_ILK, 0x01000) -#define PIPE_CRC_RES_BLUE_ILK(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A_ILK, 0x01000) -#define PIPE_CRC_RES_RES1_ILK(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_ILK, 0x01000) -#define PIPE_CRC_RES_RES2_ILK(pipe) \ - _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_ILK, 0x01000) +#define PIPE_CRC_RES_RED(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000) +#define PIPE_CRC_RES_GREEN(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000) +#define PIPE_CRC_RES_BLUE(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000) +#define PIPE_CRC_RES_RES1_I915(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000) +#define PIPE_CRC_RES_RES2_G4X(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000) /* Pipe A timing regs */ #define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) @@ -1926,8 +1963,8 @@ #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) -/* HSW eDP PSR registers */ -#define EDP_PSR_BASE(dev) 0x64800 +/* HSW+ eDP PSR registers */ +#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) #define EDP_PSR_ENABLE (1<<31) #define EDP_PSR_LINK_DISABLE (0<<27) @@ -2130,6 +2167,14 @@ #define PCH_HDMIC 0xe1150 #define PCH_HDMID 0xe1160 +#define PORT_DFT_I9XX 0x61150 +#define DC_BALANCE_RESET (1 << 25) +#define PORT_DFT2_G4X 0x61154 +#define DC_BALANCE_RESET_VLV (1 << 31) +#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) +#define PIPE_B_SCRAMBLE_RESET (1 << 1) +#define PIPE_A_SCRAMBLE_RESET (1 << 0) + /* Gen 3 SDVO bits: */ #define SDVO_ENABLE (1 << 31) #define SDVO_PIPE_SEL(pipe) ((pipe) << 30) @@ -2363,6 +2408,21 @@ #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) +#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250) +#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350) +#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ + _VLV_BLC_PWM_CTL2_B) + +#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254) +#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354) +#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ + _VLV_BLC_PWM_CTL_B) + +#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260) +#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360) +#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ + _VLV_BLC_HIST_CTL_B) + /* Backlight control */ #define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ #define BLM_PWM_ENABLE (1 << 31) @@ -3194,6 +3254,18 @@ #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) +#define _PIPE_MISC_A 0x70030 +#define _PIPE_MISC_B 0x71030 +#define PIPEMISC_DITHER_BPC_MASK (7<<5) +#define PIPEMISC_DITHER_8_BPC (0<<5) +#define PIPEMISC_DITHER_10_BPC (1<<5) +#define PIPEMISC_DITHER_6_BPC (2<<5) +#define PIPEMISC_DITHER_12_BPC (3<<5) +#define PIPEMISC_DITHER_ENABLE (1<<4) +#define PIPEMISC_DITHER_TYPE_MASK (3<<2) +#define PIPEMISC_DITHER_TYPE_SP (0<<2) +#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) + #define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) #define PIPEB_LINE_COMPARE_INT_EN (1<<29) #define PIPEB_HLINE_INT_EN (1<<28) @@ -3324,6 +3396,7 @@ #define WM1_LP_LATENCY_MASK (0x7f<<24) #define WM1_LP_FBC_MASK (0xf<<20) #define WM1_LP_FBC_SHIFT 20 +#define WM1_LP_FBC_SHIFT_BDW 19 #define WM1_LP_SR_MASK (0x7ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0xff) @@ -3906,6 +3979,7 @@ #define DE_SPRITEA_FLIP_DONE (1 << 28) #define DE_PLANEB_FLIP_DONE (1 << 27) #define DE_PLANEA_FLIP_DONE (1 << 26) +#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane))) #define DE_PCU_EVENT (1 << 25) #define DE_GTT_FAULT (1 << 24) #define DE_POISON (1 << 23) @@ -3922,12 +3996,15 @@ #define DE_PIPEB_CRC_DONE (1 << 10) #define DE_PIPEB_FIFO_UNDERRUN (1 << 8) #define DE_PIPEA_VBLANK (1 << 7) +#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe))) #define DE_PIPEA_EVEN_FIELD (1 << 6) #define DE_PIPEA_ODD_FIELD (1 << 5) #define DE_PIPEA_LINE_COMPARE (1 << 4) #define DE_PIPEA_VSYNC (1 << 3) #define DE_PIPEA_CRC_DONE (1 << 2) +#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe))) #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) +#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe))) /* More Ivybridge lolz */ #define DE_ERR_INT_IVB (1<<30) @@ -3943,9 +4020,8 @@ #define DE_PIPEB_VBLANK_IVB (1<<5) #define DE_SPRITEA_FLIP_DONE_IVB (1<<4) #define DE_PLANEA_FLIP_DONE_IVB (1<<3) +#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane))) #define DE_PIPEA_VBLANK_IVB (1<<0) - -#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7)) #define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ @@ -3961,6 +4037,71 @@ #define GTIIR 0x44018 #define GTIER 0x4401c +#define GEN8_MASTER_IRQ 0x44200 +#define GEN8_MASTER_IRQ_CONTROL (1<<31) +#define GEN8_PCU_IRQ (1<<30) +#define GEN8_DE_PCH_IRQ (1<<23) +#define GEN8_DE_MISC_IRQ (1<<22) +#define GEN8_DE_PORT_IRQ (1<<20) +#define GEN8_DE_PIPE_C_IRQ (1<<18) +#define GEN8_DE_PIPE_B_IRQ (1<<17) +#define GEN8_DE_PIPE_A_IRQ (1<<16) +#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe)) +#define GEN8_GT_VECS_IRQ (1<<6) +#define GEN8_GT_VCS2_IRQ (1<<3) +#define GEN8_GT_VCS1_IRQ (1<<2) +#define GEN8_GT_BCS_IRQ (1<<1) +#define GEN8_GT_RCS_IRQ (1<<0) + +#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which))) +#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which))) +#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which))) +#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which))) + +#define GEN8_BCS_IRQ_SHIFT 16 +#define GEN8_RCS_IRQ_SHIFT 0 +#define GEN8_VCS2_IRQ_SHIFT 16 +#define GEN8_VCS1_IRQ_SHIFT 0 +#define GEN8_VECS_IRQ_SHIFT 0 + +#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe))) +#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe))) +#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe))) +#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe))) +#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31) +#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29) +#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) +#define GEN8_PIPE_CURSOR_FAULT (1 << 10) +#define GEN8_PIPE_SPRITE_FAULT (1 << 9) +#define GEN8_PIPE_PRIMARY_FAULT (1 << 8) +#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5) +#define GEN8_PIPE_FLIP_DONE (1 << 4) +#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) +#define GEN8_PIPE_VSYNC (1 << 1) +#define GEN8_PIPE_VBLANK (1 << 0) +#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ + (GEN8_PIPE_CURSOR_FAULT | \ + GEN8_PIPE_SPRITE_FAULT | \ + GEN8_PIPE_PRIMARY_FAULT) + +#define GEN8_DE_PORT_ISR 0x44440 +#define GEN8_DE_PORT_IMR 0x44444 +#define GEN8_DE_PORT_IIR 0x44448 +#define GEN8_DE_PORT_IER 0x4444c +#define GEN8_PORT_DP_A_HOTPLUG (1 << 3) +#define GEN8_AUX_CHANNEL_A (1 << 0) + +#define GEN8_DE_MISC_ISR 0x44460 +#define GEN8_DE_MISC_IMR 0x44464 +#define GEN8_DE_MISC_IIR 0x44468 +#define GEN8_DE_MISC_IER 0x4446c +#define GEN8_DE_MISC_GSE (1 << 27) + +#define GEN8_PCU_ISR 0x444e0 +#define GEN8_PCU_IMR 0x444e4 +#define GEN8_PCU_IIR 0x444e8 +#define GEN8_PCU_IER 0x444ec + #define ILK_DISPLAY_CHICKEN2 0x42004 /* Required on all Ironlake and Sandybridge according to the B-Spec. */ #define ILK_ELPIN_409_SELECT (1 << 25) @@ -3986,8 +4127,14 @@ # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) #define CHICKEN_PAR1_1 0x42080 +#define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) +#define _CHICKEN_PIPESL_1_A 0x420b0 +#define _CHICKEN_PIPESL_1_B 0x420b4 +#define DPRS_MASK_VBLANK_SRD (1 << 0) +#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) + #define DISP_ARB_CTL 0x45000 #define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_FBC_WM_DIS (1<<15) @@ -3998,6 +4145,8 @@ /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) +#define COMMON_SLICE_CHICKEN2 0x7014 +# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) #define GEN7_L3CNTLREG1 0xB01C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C @@ -4013,6 +4162,9 @@ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) +#define HSW_SCRATCH1 0xb038 +#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) + #define HSW_FUSE_STRAP 0x42014 #define HSW_CDCLK_LIMIT (1 << 24) @@ -4408,7 +4560,9 @@ #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) #define SOUTH_DSPCLK_GATE_D 0xc2020 +#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) +#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) /* CPU: FDI_TX */ @@ -4821,6 +4975,7 @@ #define GEN6_PCODE_WRITE_D_COMP 0x11 #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) +#define DISPLAY_IPS_CONTROL 0x19 #define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 @@ -4858,12 +5013,20 @@ #define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ #define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 #define GEN7_MAX_PS_THREAD_DEP (8<<12) +#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) #define GEN7_ROW_CHICKEN2 0xe4f4 #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 #define DOP_CLOCK_GATING_DISABLE (1<<0) +#define HSW_ROW_CHICKEN3 0xe49c +#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) + +#define HALF_SLICE_CHICKEN3 0xe184 +#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) +#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) + #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 @@ -4905,6 +5068,18 @@ CPT_AUD_CNTL_ST_B) #define CPT_AUD_CNTRL_ST2 0xE50C0 +#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050) +#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150) +#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ + VLV_HDMIW_HDMIEDID_A, \ + VLV_HDMIW_HDMIEDID_B) +#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4) +#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4) +#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \ + VLV_AUD_CNTL_ST_A, \ + VLV_AUD_CNTL_ST_B) +#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0) + /* These are the 4 32-bit write offset registers for each stream * output buffer. It determines the offset from the * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. @@ -4921,6 +5096,12 @@ #define CPT_AUD_CFG(pipe) _PIPE(pipe, \ CPT_AUD_CONFIG_A, \ CPT_AUD_CONFIG_B) +#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000) +#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100) +#define VLV_AUD_CFG(pipe) _PIPE(pipe, \ + VLV_AUD_CONFIG_A, \ + VLV_AUD_CONFIG_B) + #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) #define AUD_CONFIG_UPPER_N_SHIFT 20 @@ -5063,6 +5244,7 @@ #define DDI_BUF_CTL_B 0x64100 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) #define DDI_BUF_CTL_ENABLE (1<<31) +/* Haswell */ #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ @@ -5072,6 +5254,16 @@ #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ +/* Broadwell */ +#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */ +#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */ +#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */ +#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */ +#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */ +#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */ +#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */ +#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */ +#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_PORT_REVERSAL (1<<16) #define DDI_BUF_IS_IDLE (1<<7) @@ -5181,6 +5373,9 @@ #define LCPLL_PLL_LOCK (1<<30) #define LCPLL_CLK_FREQ_MASK (3<<26) #define LCPLL_CLK_FREQ_450 (0<<26) +#define LCPLL_CLK_FREQ_54O_BDW (1<<26) +#define LCPLL_CLK_FREQ_337_5_BDW (2<<26) +#define LCPLL_CLK_FREQ_675_BDW (3<<26) #define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) #define LCPLL_POWER_DOWN_ALLOW (1<<22) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a088f1f46bd..98790c7cccb 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -214,6 +214,22 @@ static void i915_save_display(struct drm_device *dev) dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); + } else if (IS_VALLEYVIEW(dev)) { + dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); + dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + + dev_priv->regfile.saveBLC_PWM_CTL = + I915_READ(VLV_BLC_PWM_CTL(PIPE_A)); + dev_priv->regfile.saveBLC_HIST_CTL = + I915_READ(VLV_BLC_HIST_CTL(PIPE_A)); + dev_priv->regfile.saveBLC_PWM_CTL2 = + I915_READ(VLV_BLC_PWM_CTL2(PIPE_A)); + dev_priv->regfile.saveBLC_PWM_CTL_B = + I915_READ(VLV_BLC_PWM_CTL(PIPE_B)); + dev_priv->regfile.saveBLC_HIST_CTL_B = + I915_READ(VLV_BLC_HIST_CTL(PIPE_B)); + dev_priv->regfile.saveBLC_PWM_CTL2_B = + I915_READ(VLV_BLC_PWM_CTL2(PIPE_B)); } else { dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); @@ -302,6 +318,19 @@ static void i915_restore_display(struct drm_device *dev) I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); I915_WRITE(RSTDBYCTL, dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); + } else if (IS_VALLEYVIEW(dev)) { + I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A), + dev_priv->regfile.saveBLC_PWM_CTL); + I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A), + dev_priv->regfile.saveBLC_HIST_CTL); + I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A), + dev_priv->regfile.saveBLC_PWM_CTL2); + I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B), + dev_priv->regfile.saveBLC_PWM_CTL); + I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B), + dev_priv->regfile.saveBLC_HIST_CTL); + I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B), + dev_priv->regfile.saveBLC_PWM_CTL2); } else { I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 57fe1ae32a0..dfff0907f70 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -193,16 +193,14 @@ out: static bool intel_dsm_pci_probe(struct pci_dev *pdev) { - acpi_handle dhandle, intel_handle; - acpi_status status; + acpi_handle dhandle; int ret; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; - status = acpi_get_handle(dhandle, "_DSM", &intel_handle); - if (ACPI_FAILURE(status)) { + if (!acpi_has_method(dhandle, "_DSM")) { DRM_DEBUG_KMS("no _DSM method for intel device\n"); return false; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index e29bcae1ef8..e4fba39631a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -624,11 +624,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, aux_channel = child->raw[25]; - is_dvi = child->common.device_type & (1 << 4); - is_dp = child->common.device_type & (1 << 2); - is_crt = child->common.device_type & (1 << 0); - is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0; - is_edp = is_dp && (child->common.device_type & (1 << 12)); + is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; + is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; + is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT; + is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; + is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); info->supports_dvi = is_dvi; info->supports_hdmi = is_hdmi; @@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) /* Default to using SSC */ dev_priv->vbt.lvds_use_ssc = 1; - dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); + /* + * Core/SandyBridge/IvyBridge use alternative (120MHz) reference + * clock for LVDS. + */ + dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, + !HAS_PCH_SPLIT(dev)); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); for (port = PORT_A; port < I915_MAX_PORTS; port++) { diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 287cc5a21c2..f580a2b0ddd 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -638,6 +638,40 @@ int intel_parse_bios(struct drm_device *dev); #define DEVICE_TYPE_DP 0x68C6 #define DEVICE_TYPE_eDP 0x78C6 +#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) +#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14) +#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13) +#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12) +#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11) +#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10) +#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9) +#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8) +#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6) +#define DEVICE_TYPE_LVDS_SINGALING (1 << 5) +#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4) +#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3) +#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2) +#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) +#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) + +/* + * Bits we care about when checking for DEVICE_TYPE_eDP + * Depending on the system, the other bits may or may not + * be set for eDP outputs. + */ +#define DEVICE_TYPE_eDP_BITS \ + (DEVICE_TYPE_INTERNAL_CONNECTOR | \ + DEVICE_TYPE_NOT_HDMI_OUTPUT | \ + DEVICE_TYPE_MIPI_OUTPUT | \ + DEVICE_TYPE_COMPOSITE_OUTPUT | \ + DEVICE_TYPE_DUAL_CHANNEL | \ + DEVICE_TYPE_LVDS_SINGALING | \ + DEVICE_TYPE_TMDS_DVI_SIGNALING | \ + DEVICE_TYPE_VIDEO_SIGNALING | \ + DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ + DEVICE_TYPE_DIGITAL_OUTPUT | \ + DEVICE_TYPE_ANALOG_OUTPUT) + /* define the DVO port for HDMI output type */ #define DVO_B 1 #define DVO_C 2 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 2e01bd3a5d8..b5b1b9b23ad 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -822,16 +822,15 @@ void intel_crt_init(struct drm_device *dev) crt->base.mode_set = intel_crt_mode_set; crt->base.disable = intel_disable_crt; crt->base.enable = intel_enable_crt; - if (IS_HASWELL(dev)) - crt->base.get_config = hsw_crt_get_config; - else - crt->base.get_config = intel_crt_get_config; if (I915_HAS_HOTPLUG(dev)) crt->base.hpd_pin = HPD_CRT; - if (HAS_DDI(dev)) + if (HAS_DDI(dev)) { + crt->base.get_config = hsw_crt_get_config; crt->base.get_hw_state = intel_ddi_get_hw_state; - else + } else { + crt->base.get_config = intel_crt_get_config; crt->base.get_hw_state = intel_crt_get_hw_state; + } intel_connector->get_hw_state = intel_connector_get_hw_state; drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 31f4fe27138..330077bcd0b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -72,6 +72,45 @@ static const u32 hsw_ddi_translations_hdmi[] = { 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */ }; +static const u32 bdw_ddi_translations_edp[] = { + 0x00FFFFFF, 0x00000012, /* DP parameters */ + 0x00EBAFFF, 0x00020011, + 0x00C71FFF, 0x0006000F, + 0x00FFFFFF, 0x00020011, + 0x00DB6FFF, 0x0005000F, + 0x00BEEFFF, 0x000A000C, + 0x00FFFFFF, 0x0005000F, + 0x00DB6FFF, 0x000A000C, + 0x00FFFFFF, 0x000A000C, + 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ +}; + +static const u32 bdw_ddi_translations_dp[] = { + 0x00FFFFFF, 0x0007000E, /* DP parameters */ + 0x00D75FFF, 0x000E000A, + 0x00BEFFFF, 0x00140006, + 0x00FFFFFF, 0x000E000A, + 0x00D75FFF, 0x00180004, + 0x80CB2FFF, 0x001B0002, + 0x00F7DFFF, 0x00180004, + 0x80D75FFF, 0x001B0002, + 0x80FFFFFF, 0x001B0002, + 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ +}; + +static const u32 bdw_ddi_translations_fdi[] = { + 0x00FFFFFF, 0x0001000E, /* FDI parameters */ + 0x00D75FFF, 0x0004000A, + 0x00C30FFF, 0x00070006, + 0x00AAAFFF, 0x000C0000, + 0x00FFFFFF, 0x0004000A, + 0x00D75FFF, 0x00090004, + 0x00C30FFF, 0x000C0000, + 0x00FFFFFF, 0x00070006, + 0x00D75FFF, 0x000C0000, + 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ +}; + enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; @@ -92,8 +131,9 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) } } -/* On Haswell, DDI port buffers must be programmed with correct values - * in advance. The buffer values are different for FDI and DP modes, +/* + * Starting with Haswell, DDI port buffers must be programmed with correct + * values in advance. The buffer values are different for FDI and DP modes, * but the HDMI/DVI fields are shared among those. So we program the DDI * in either FDI or DP modes only, as HDMI connections will work with both * of those @@ -103,10 +143,47 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; int i; - const u32 *ddi_translations = (port == PORT_E) ? - hsw_ddi_translations_fdi : - hsw_ddi_translations_dp; int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + const u32 *ddi_translations_fdi; + const u32 *ddi_translations_dp; + const u32 *ddi_translations_edp; + const u32 *ddi_translations; + + if (IS_BROADWELL(dev)) { + ddi_translations_fdi = bdw_ddi_translations_fdi; + ddi_translations_dp = bdw_ddi_translations_dp; + ddi_translations_edp = bdw_ddi_translations_edp; + } else if (IS_HASWELL(dev)) { + ddi_translations_fdi = hsw_ddi_translations_fdi; + ddi_translations_dp = hsw_ddi_translations_dp; + ddi_translations_edp = hsw_ddi_translations_dp; + } else { + WARN(1, "ddi translation table missing\n"); + ddi_translations_edp = bdw_ddi_translations_dp; + ddi_translations_fdi = bdw_ddi_translations_fdi; + ddi_translations_dp = bdw_ddi_translations_dp; + } + + switch (port) { + case PORT_A: + ddi_translations = ddi_translations_edp; + break; + case PORT_B: + case PORT_C: + ddi_translations = ddi_translations_dp; + break; + case PORT_D: + if (intel_dpd_is_edp(dev)) + ddi_translations = ddi_translations_edp; + else + ddi_translations = ddi_translations_dp; + break; + case PORT_E: + ddi_translations = ddi_translations_fdi; + break; + default: + BUG(); + } for (i = 0, reg = DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { @@ -756,7 +833,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; enum port port = intel_ddi_get_encoder_port(intel_encoder); @@ -792,10 +870,11 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) if (cpu_transcoder == TRANSCODER_EDP) { switch (pipe) { case PIPE_A: - /* Can only use the always-on power well for eDP when - * not using the panel fitter, and when not using motion - * blur mitigation (which we don't support). */ - if (intel_crtc->config.pch_pfit.enabled) + /* On Haswell, can only use the always-on power well for + * eDP when not using the panel fitter, and when not + * using motion blur mitigation (which we don't + * support). */ + if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled) temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; else temp |= TRANS_DDI_EDP_INPUT_A_ON; @@ -1156,18 +1235,29 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; uint32_t lcpll = I915_READ(LCPLL_CTL); + uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; - if (lcpll & LCPLL_CD_SOURCE_FCLK) + if (lcpll & LCPLL_CD_SOURCE_FCLK) { return 800000; - else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) + } else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) { return 450000; - else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450) + } else if (freq == LCPLL_CLK_FREQ_450) { return 450000; - else if (IS_ULT(dev_priv->dev)) - return 337500; - else - return 540000; + } else if (IS_HASWELL(dev)) { + if (IS_ULT(dev)) + return 337500; + else + return 540000; + } else { + if (freq == LCPLL_CLK_FREQ_54O_BDW) + return 540000; + else if (freq == LCPLL_CLK_FREQ_337_5_BDW) + return 337500; + else + return 675000; + } } void intel_ddi_pll_init(struct drm_device *dev) @@ -1316,6 +1406,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder, default: break; } + + if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { + /* + * This is a big fat ugly hack. + * + * Some machines in UEFI boot mode provide us a VBT that has 18 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons + * unknown we fail to light up. Yet the same BIOS boots up with + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as + * max, not what it tells us to use. + * + * Note: This will still be broken if the eDP panel is not lit + * up by the BIOS, and thus we can't get the mode at module + * load. + */ + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); + dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; + } } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3cf284fa850..7ec8b488bb1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2156,7 +2156,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc, else dspcntr &= ~DISPPLANE_TILED; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; else dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; @@ -2176,7 +2176,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_MODIFY_DISPBASE(DSPSURF(plane), i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); } else { I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); @@ -2421,9 +2421,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) FDI_FE_ERRC_ENABLE); } -static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc) +static bool pipe_has_enabled_pch(struct intel_crtc *crtc) { - return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder; + return crtc->base.enabled && crtc->active && + crtc->config.has_pch_encoder; } static void ivb_modeset_global_resources(struct drm_device *dev) @@ -3074,6 +3075,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, I915_READ(VSYNCSHIFT(cpu_transcoder))); } +static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t temp; + + temp = I915_READ(SOUTH_CHICKEN1); + if (temp & FDI_BC_BIFURCATION_SELECT) + return; + + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); + + temp |= FDI_BC_BIFURCATION_SELECT; + DRM_DEBUG_KMS("enabling fdi C rx\n"); + I915_WRITE(SOUTH_CHICKEN1, temp); + POSTING_READ(SOUTH_CHICKEN1); +} + +static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + switch (intel_crtc->pipe) { + case PIPE_A: + break; + case PIPE_B: + if (intel_crtc->config.fdi_lanes > 2) + WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); + else + cpt_enable_fdi_bc_bifurcation(dev); + + break; + case PIPE_C: + cpt_enable_fdi_bc_bifurcation(dev); + + break; + default: + BUG(); + } +} + /* * Enable PCH resources required for PCH ports: * - PCH PLLs @@ -3092,6 +3135,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) assert_pch_transcoder_disabled(dev_priv, pipe); + if (IS_IVYBRIDGE(dev)) + ivybridge_update_fdi_bc_bifurcation(intel_crtc); + /* Write the TU size bits before fdi link training, so that error * detection works. */ I915_WRITE(FDI_RX_TUSIZE1(pipe), @@ -3347,15 +3393,26 @@ void hsw_enable_ips(struct intel_crtc *crtc) * only after intel_enable_plane. And intel_enable_plane already waits * for a vblank, so all we need to do here is to enable the IPS bit. */ assert_plane_enabled(dev_priv, crtc->plane); - I915_WRITE(IPS_CTL, IPS_ENABLE); - - /* The bit only becomes 1 in the next vblank, so this wait here is - * essentially intel_wait_for_vblank. If we don't have this and don't - * wait for vblanks until the end of crtc_enable, then the HW state - * readout code will complain that the expected IPS_CTL value is not the - * one we read. */ - if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) - DRM_ERROR("Timed out waiting for IPS enable\n"); + if (IS_BROADWELL(crtc->base.dev)) { + mutex_lock(&dev_priv->rps.hw_lock); + WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); + mutex_unlock(&dev_priv->rps.hw_lock); + /* Quoting Art Runyan: "its not safe to expect any particular + * value in IPS_CTL bit 31 after enabling IPS through the + * mailbox." Therefore we need to defer waiting on the state + * change. + * TODO: need to fix this for state checker + */ + } else { + I915_WRITE(IPS_CTL, IPS_ENABLE); + /* The bit only becomes 1 in the next vblank, so this wait here + * is essentially intel_wait_for_vblank. If we don't have this + * and don't wait for vblanks until the end of crtc_enable, then + * the HW state readout code will complain that the expected + * IPS_CTL value is not the one we read. */ + if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) + DRM_ERROR("Timed out waiting for IPS enable\n"); + } } void hsw_disable_ips(struct intel_crtc *crtc) @@ -3367,7 +3424,12 @@ void hsw_disable_ips(struct intel_crtc *crtc) return; assert_plane_enabled(dev_priv, crtc->plane); - I915_WRITE(IPS_CTL, 0); + if (IS_BROADWELL(crtc->base.dev)) { + mutex_lock(&dev_priv->rps.hw_lock); + WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); + mutex_unlock(&dev_priv->rps.hw_lock); + } else + I915_WRITE(IPS_CTL, 0); POSTING_READ(IPS_CTL); /* We need to wait for a vblank before we can disable the plane. */ @@ -4156,8 +4218,6 @@ static void intel_connector_check_state(struct intel_connector *connector) * consider. */ void intel_connector_dpms(struct drm_connector *connector, int mode) { - struct intel_encoder *encoder = intel_attached_encoder(connector); - /* All the simple cases only support two dpms states. */ if (mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; @@ -4168,10 +4228,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode) connector->dpms = mode; /* Only need to change hw state when actually enabled */ - if (encoder->base.crtc) - intel_encoder_dpms(encoder, mode); - else - WARN_ON(encoder->connectors_active != false); + if (connector->encoder) + intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); intel_modeset_check_state(connector->dev); } @@ -4202,7 +4260,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return false; } - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { if (pipe_config->fdi_lanes > 2) { DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", pipe_config->fdi_lanes); @@ -5776,14 +5834,16 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc) static void haswell_set_pipeconf(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; uint32_t val; val = 0; - if (intel_crtc->config.dither) + if (IS_HASWELL(dev) && intel_crtc->config.dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) @@ -5796,6 +5856,33 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); + + if (IS_BROADWELL(dev)) { + val = 0; + + switch (intel_crtc->config.pipe_bpp) { + case 18: + val |= PIPEMISC_DITHER_6_BPC; + break; + case 24: + val |= PIPEMISC_DITHER_8_BPC; + break; + case 30: + val |= PIPEMISC_DITHER_10_BPC; + break; + case 36: + val |= PIPEMISC_DITHER_12_BPC; + break; + default: + /* Case prevented by pipe_config_set_bpp. */ + BUG(); + } + + if (intel_crtc->config.dither) + val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; + + I915_WRITE(PIPEMISC(pipe), val); + } } static bool ironlake_compute_clocks(struct drm_crtc *crtc, @@ -5849,48 +5936,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, return true; } -static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp; - - temp = I915_READ(SOUTH_CHICKEN1); - if (temp & FDI_BC_BIFURCATION_SELECT) - return; - - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp |= FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("enabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - POSTING_READ(SOUTH_CHICKEN1); -} - -static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - switch (intel_crtc->pipe) { - case PIPE_A: - break; - case PIPE_B: - if (intel_crtc->config.fdi_lanes > 2) - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); - else - cpt_enable_fdi_bc_bifurcation(dev); - - break; - case PIPE_C: - cpt_enable_fdi_bc_bifurcation(dev); - - break; - default: - BUG(); - } -} - int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) { /* @@ -6079,9 +6124,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, &intel_crtc->config.fdi_m_n); } - if (IS_IVYBRIDGE(dev)) - ivybridge_update_fdi_bc_bifurcation(intel_crtc); - ironlake_set_pipeconf(crtc); /* Set up the display plane register */ @@ -6476,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) void hsw_enable_package_c8(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + mutex_lock(&dev_priv->pc8.lock); __hsw_enable_package_c8(dev_priv); mutex_unlock(&dev_priv->pc8.lock); @@ -6483,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv) void hsw_disable_package_c8(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + mutex_lock(&dev_priv->pc8.lock); __hsw_disable_package_c8(dev_priv); mutex_unlock(&dev_priv->pc8.lock); @@ -6520,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; bool allow; + if (!HAS_PC8(dev_priv->dev)) + return; + if (!i915_enable_pc8) return; @@ -6543,36 +6594,103 @@ done: static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + + mutex_lock(&dev_priv->pc8.lock); if (!dev_priv->pc8.gpu_idle) { dev_priv->pc8.gpu_idle = true; - hsw_enable_package_c8(dev_priv); + __hsw_enable_package_c8(dev_priv); } + mutex_unlock(&dev_priv->pc8.lock); } static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + + mutex_lock(&dev_priv->pc8.lock); if (dev_priv->pc8.gpu_idle) { dev_priv->pc8.gpu_idle = false; - hsw_disable_package_c8(dev_priv); + __hsw_disable_package_c8(dev_priv); } + mutex_unlock(&dev_priv->pc8.lock); } -static void haswell_modeset_global_resources(struct drm_device *dev) +#define for_each_power_domain(domain, mask) \ + for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ + if ((1 << (domain)) & (mask)) + +static unsigned long get_pipe_power_domains(struct drm_device *dev, + enum pipe pipe, bool pfit_enabled) { - bool enable = false; + unsigned long mask; + enum transcoder transcoder; + + transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); + + mask = BIT(POWER_DOMAIN_PIPE(pipe)); + mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); + if (pfit_enabled) + mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); + + return mask; +} + +void intel_display_set_init_power(struct drm_device *dev, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->power_domains.init_power_on == enable) + return; + + if (enable) + intel_display_power_get(dev, POWER_DOMAIN_INIT); + else + intel_display_power_put(dev, POWER_DOMAIN_INIT); + + dev_priv->power_domains.init_power_on = enable; +} + +static void modeset_update_power_wells(struct drm_device *dev) +{ + unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; struct intel_crtc *crtc; + /* + * First get all needed power domains, then put all unneeded, to avoid + * any unnecessary toggling of the power wells. + */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + enum intel_display_power_domain domain; + if (!crtc->base.enabled) continue; - if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled || - crtc->config.cpu_transcoder != TRANSCODER_EDP) - enable = true; + pipe_domains[crtc->pipe] = get_pipe_power_domains(dev, + crtc->pipe, + crtc->config.pch_pfit.enabled); + + for_each_power_domain(domain, pipe_domains[crtc->pipe]) + intel_display_power_get(dev, domain); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + enum intel_display_power_domain domain; + + for_each_power_domain(domain, crtc->enabled_power_domains) + intel_display_power_put(dev, domain); + + crtc->enabled_power_domains = pipe_domains[crtc->pipe]; } - intel_set_power_well(dev, enable); + intel_display_set_init_power(dev, false); +} +static void haswell_modeset_global_resources(struct drm_device *dev) +{ + modeset_update_power_wells(dev); hsw_update_package_c8(dev); } @@ -6935,6 +7053,11 @@ static void ironlake_write_eld(struct drm_connector *connector, aud_config = IBX_AUD_CFG(pipe); aud_cntl_st = IBX_AUD_CNTL_ST(pipe); aud_cntrl_st2 = IBX_AUD_CNTL_ST2; + } else if (IS_VALLEYVIEW(connector->dev)) { + hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); + aud_config = VLV_AUD_CFG(pipe); + aud_cntl_st = VLV_AUD_CNTL_ST(pipe); + aud_cntrl_st2 = VLV_AUD_CNTL_ST2; } else { hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); aud_config = CPT_AUD_CFG(pipe); @@ -6944,8 +7067,19 @@ static void ironlake_write_eld(struct drm_connector *connector, DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); - i = I915_READ(aud_cntl_st); - i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ + if (IS_VALLEYVIEW(connector->dev)) { + struct intel_encoder *intel_encoder; + struct intel_digital_port *intel_dig_port; + + intel_encoder = intel_attached_encoder(connector); + intel_dig_port = enc_to_dig_port(&intel_encoder->base); + i = intel_dig_port->port; + } else { + i = I915_READ(aud_cntl_st); + i = (i >> 29) & DIP_PORT_SEL_MASK; + /* DIP_Port_Select, 0x1 = PortB */ + } + if (!i) { DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); /* operate blindly on all ports */ @@ -7069,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ + POSTING_READ(CURCNTR(pipe)); I915_WRITE(CURBASE(pipe), base); + POSTING_READ(CURBASE(pipe)); } static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) @@ -7089,7 +7225,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl |= CURSOR_MODE_DISABLE; } - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { cntl |= CURSOR_PIPE_CSC_ENABLE; cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; } @@ -7098,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ + POSTING_READ(CURCNTR_IVB(pipe)); I915_WRITE(CURBASE_IVB(pipe), base); + POSTING_READ(CURBASE_IVB(pipe)); } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ @@ -7145,7 +7283,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, if (!visible && !intel_crtc->cursor_visible) return; - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) { I915_WRITE(CURPOS_IVB(pipe), pos); ivb_update_cursor(crtc, base); } else { @@ -7276,8 +7414,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - intel_crtc->cursor_x = x; - intel_crtc->cursor_y = y; + intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX); + intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX); if (intel_crtc->active) intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); @@ -9133,8 +9271,7 @@ check_crtc_state(struct drm_device *dev) enum pipe pipe; if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config && - encoder->get_hw_state(encoder, &pipe)) + if (encoder->get_hw_state(encoder, &pipe)) encoder->get_config(encoder, &pipe_config); } @@ -9804,6 +9941,18 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); } +enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) +{ + struct drm_encoder *encoder = connector->base.encoder; + + WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex)); + + if (!encoder) + return INVALID_PIPE; + + return to_intel_crtc(encoder->crtc)->pipe; +} + int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file) { @@ -10255,7 +10404,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.write_eld = ironlake_write_eld; dev_priv->display.modeset_global_resources = ivb_modeset_global_resources; - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; dev_priv->display.write_eld = haswell_write_eld; dev_priv->display.modeset_global_resources = @@ -10263,7 +10412,8 @@ static void intel_init_display(struct drm_device *dev) } } else if (IS_G4X(dev)) { dev_priv->display.write_eld = g4x_write_eld; - } + } else if (IS_VALLEYVIEW(dev)) + dev_priv->display.write_eld = ironlake_write_eld; /* Default just returns -ENODEV to indicate unsupported */ dev_priv->display.queue_flip = intel_default_queue_flip; @@ -10286,6 +10436,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.queue_flip = intel_gen6_queue_flip; break; case 7: + case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ dev_priv->display.queue_flip = intel_gen7_queue_flip; break; } @@ -10441,33 +10592,6 @@ static void i915_disable_vga(struct drm_device *dev) POSTING_READ(vga_reg); } -static void i915_enable_vga_mem(struct drm_device *dev) -{ - /* Enable VGA memory on Intel HD */ - if (HAS_PCH_SPLIT(dev)) { - vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); - outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE); - vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | - VGA_RSRC_LEGACY_MEM | - VGA_RSRC_NORMAL_IO | - VGA_RSRC_NORMAL_MEM); - vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); - } -} - -void i915_disable_vga_mem(struct drm_device *dev) -{ - /* Disable VGA memory on Intel HD */ - if (HAS_PCH_SPLIT(dev)) { - vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); - outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE); - vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | - VGA_RSRC_NORMAL_IO | - VGA_RSRC_NORMAL_MEM); - vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); - } -} - void intel_modeset_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -10753,7 +10877,6 @@ void i915_redisable_vga(struct drm_device *dev) if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); i915_disable_vga(dev); - i915_disable_vga_mem(dev); } } @@ -10808,8 +10931,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (encoder->get_hw_state(encoder, &pipe)) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); encoder->base.crtc = &crtc->base; - if (encoder->get_config) - encoder->get_config(encoder, &crtc->config); + encoder->get_config(encoder, &crtc->config); } else { encoder->base.crtc = NULL; } @@ -10960,8 +11082,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_disable_fbc(dev); - i915_enable_vga_mem(dev); - intel_disable_gt_powersave(dev); ironlake_teardown_rc6(dev); @@ -11073,7 +11193,7 @@ intel_display_capture_error_state(struct drm_device *dev) if (INTEL_INFO(dev)->num_pipes == 0) return NULL; - error = kmalloc(sizeof(*error), GFP_ATOMIC); + error = kzalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; @@ -11081,6 +11201,9 @@ intel_display_capture_error_state(struct drm_device *dev) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(i) { + if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i))) + continue; + if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); @@ -11114,6 +11237,10 @@ intel_display_capture_error_state(struct drm_device *dev) for (i = 0; i < error->num_transcoders; i++) { enum transcoder cpu_transcoder = transcoders[i]; + if (!intel_display_power_enabled(dev, + POWER_DOMAIN_TRANSCODER(cpu_transcoder))) + continue; + error->transcoder[i].cpu_transcoder = cpu_transcoder; error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); @@ -11125,12 +11252,6 @@ intel_display_capture_error_state(struct drm_device *dev) error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); } - /* In the code above we read the registers without checking if the power - * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to - * prevent the next I915_WRITE from detecting it and printing an error - * message. */ - intel_uncore_clear_errors(dev); - return error; } @@ -11175,7 +11296,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, } for (i = 0; i < error->num_transcoders; i++) { - err_printf(m, " CPU transcoder: %c\n", + err_printf(m, "CPU transcoder: %c\n", transcoder_name(error->transcoder[i].cpu_transcoder)); err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1e3d2720d81..0b2e842fef0 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -405,6 +405,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint32_t status; int try, precharge, clock = 0; bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); + uint32_t timeout; /* dp aux is extremely sensitive to irq latency, hence request the * lowest possible wakeup latency and so prevent the cpu from going into @@ -419,6 +420,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, else precharge = 5; + if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL) + timeout = DP_AUX_CH_CTL_TIME_OUT_600us; + else + timeout = DP_AUX_CH_CTL_TIME_OUT_400us; + intel_aux_display_runtime_get(dev_priv); /* Try to wait for any previous AUX channel activity */ @@ -454,7 +460,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, I915_WRITE(ch_ctl, DP_AUX_CH_CTL_SEND_BUSY | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | - DP_AUX_CH_CTL_TIME_OUT_400us | + timeout | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | @@ -623,6 +629,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, int reply_bytes; int ret; + ironlake_edp_panel_vdd_on(intel_dp); intel_dp_check_edp(intel_dp); /* Set up the command byte */ if (mode & MODE_I2C_READ) @@ -665,7 +672,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, reply, reply_bytes); if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); - return ret; + goto out; } switch (reply[0] & AUX_NATIVE_REPLY_MASK) { @@ -676,7 +683,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, break; case AUX_NATIVE_REPLY_NACK: DRM_DEBUG_KMS("aux_ch native nack\n"); - return -EREMOTEIO; + ret = -EREMOTEIO; + goto out; case AUX_NATIVE_REPLY_DEFER: /* * For now, just give more slack to branch devices. We @@ -694,7 +702,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, default: DRM_ERROR("aux_ch invalid native reply 0x%02x\n", reply[0]); - return -EREMOTEIO; + ret = -EREMOTEIO; + goto out; } switch (reply[0] & AUX_I2C_REPLY_MASK) { @@ -702,22 +711,29 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, if (mode == MODE_I2C_READ) { *read_byte = reply[1]; } - return reply_bytes - 1; + ret = reply_bytes - 1; + goto out; case AUX_I2C_REPLY_NACK: DRM_DEBUG_KMS("aux_i2c nack\n"); - return -EREMOTEIO; + ret = -EREMOTEIO; + goto out; case AUX_I2C_REPLY_DEFER: DRM_DEBUG_KMS("aux_i2c defer\n"); udelay(100); break; default: DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); - return -EREMOTEIO; + ret = -EREMOTEIO; + goto out; } } DRM_ERROR("too many retries, giving up\n"); - return -EREMOTEIO; + ret = -EREMOTEIO; + +out: + ironlake_edp_panel_vdd_off(intel_dp, false); + return ret; } static int @@ -739,9 +755,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, intel_dp->adapter.algo_data = &intel_dp->algo; intel_dp->adapter.dev.parent = intel_connector->base.kdev; - ironlake_edp_panel_vdd_on(intel_dp); ret = i2c_dp_aux_add_bus(&intel_dp->adapter); - ironlake_edp_panel_vdd_off(intel_dp, false); return ret; } @@ -1069,17 +1083,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) if (!is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn eDP VDD on\n"); WARN(intel_dp->want_panel_vdd, "eDP VDD already requested on\n"); intel_dp->want_panel_vdd = true; - if (ironlake_edp_have_panel_vdd(intel_dp)) { - DRM_DEBUG_KMS("eDP VDD already on\n"); + if (ironlake_edp_have_panel_vdd(intel_dp)) return; - } + + DRM_DEBUG_KMS("Turning eDP VDD on\n"); if (!ironlake_edp_have_panel_power(intel_dp)) ironlake_wait_panel_power_cycle(intel_dp); @@ -1113,11 +1126,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { + DRM_DEBUG_KMS("Turning eDP VDD off\n"); + pp = ironlake_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; - pp_stat_reg = _pp_ctrl_reg(intel_dp); - pp_ctrl_reg = _pp_stat_reg(intel_dp); + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + pp_stat_reg = _pp_stat_reg(intel_dp); I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -1145,7 +1160,6 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) if (!is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); intel_dp->want_panel_vdd = false; @@ -1241,7 +1255,6 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; u32 pp; u32 pp_ctrl_reg; @@ -1264,7 +1277,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - intel_panel_enable_backlight(dev, pipe); + intel_panel_enable_backlight(intel_dp->attached_connector); } void ironlake_edp_backlight_off(struct intel_dp *intel_dp) @@ -1277,7 +1290,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp) if (!is_edp(intel_dp)) return; - intel_panel_disable_backlight(dev); + intel_panel_disable_backlight(intel_dp->attached_connector); DRM_DEBUG_KMS("\n"); pp = ironlake_get_pp_control(intel_dp); @@ -1476,6 +1489,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder, ironlake_check_encoder_dotclock(pipe_config, dotclock); pipe_config->adjusted_mode.crtc_clock = dotclock; + + if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { + /* + * This is a big fat ugly hack. + * + * Some machines in UEFI boot mode provide us a VBT that has 18 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons + * unknown we fail to light up. Yet the same BIOS boots up with + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as + * max, not what it tells us to use. + * + * Note: This will still be broken if the eDP panel is not lit + * up by the BIOS, and thus we can't get the mode at module + * load. + */ + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); + dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; + } } static bool is_edp_psr(struct drm_device *dev) @@ -1543,7 +1576,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) /* Avoid continuous PSR exit by masking memup and hpd */ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD); + EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); intel_dp->psr_setup_done = true; } @@ -1583,6 +1616,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) uint32_t max_sleep_time = 0x1f; uint32_t idle_frames = 1; uint32_t val = 0x0; + const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { val |= EDP_PSR_LINK_STANDBY; @@ -1593,7 +1627,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) val |= EDP_PSR_LINK_DISABLE; I915_WRITE(EDP_PSR_CTL(dev), val | - EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | + IS_BROADWELL(dev) ? 0 : link_entry_time | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | EDP_PSR_ENABLE); @@ -1740,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder) * ensure that we have vdd while we switch off the panel. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); ironlake_edp_panel_off(intel_dp); /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ @@ -1930,7 +1964,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) return DP_TRAIN_VOLTAGE_SWING_1200; else if (IS_GEN7(dev) && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_800; @@ -1946,7 +1980,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (HAS_DDI(dev)) { + if (IS_BROADWELL(dev)) { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } + } else if (IS_HASWELL(dev)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_9_5; @@ -2258,6 +2303,41 @@ intel_hsw_signal_levels(uint8_t train_set) } } +static uint32_t +intel_bdw_signal_levels(uint8_t train_set) +{ + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: + return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: + return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: + return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ + + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: + return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: + return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: + return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ + + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: + return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: + return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ + + case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: + return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ + + default: + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" + "0x%x\n", signal_levels); + return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ + } +} + /* Properly updates "DP" with the correct signal levels. */ static void intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) @@ -2268,7 +2348,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) uint32_t signal_levels, mask; uint8_t train_set = intel_dp->train_set[0]; - if (HAS_DDI(dev)) { + if (IS_BROADWELL(dev)) { + signal_levels = intel_bdw_signal_levels(train_set); + mask = DDI_BUF_EMP_MASK; + } else if (IS_HASWELL(dev)) { signal_levels = intel_hsw_signal_levels(train_set); mask = DDI_BUF_EMP_MASK; } else if (IS_VALLEYVIEW(dev)) { @@ -3256,7 +3339,8 @@ bool intel_dpd_is_edp(struct drm_device *dev) p_child = dev_priv->vbt.child_dev + i; if (p_child->common.dvo_port == PORT_IDPD && - p_child->common.device_type == DEVICE_TYPE_eDP) + (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == + (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) return true; } return false; @@ -3478,7 +3562,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, &power_seq); - ironlake_edp_panel_vdd_on(intel_dp); edid = drm_get_edid(connector, &intel_dp->adapter); if (edid) { if (drm_add_edid_modes(connector, edid)) { @@ -3510,8 +3593,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; } - ironlake_edp_panel_vdd_off(intel_dp, false); - intel_panel_init(&intel_connector->panel, fixed_mode); intel_panel_setup_backlight(connector); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e33f387d418..1e49aa8f537 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -326,6 +326,7 @@ struct intel_crtc { * some outputs connected to this crtc. */ bool active; + unsigned long enabled_power_domains; bool eld_vld; bool primary_enabled; /* is the primary plane (partially) visible? */ bool lowfreq_avail; @@ -629,6 +630,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector, struct drm_encoder *intel_best_encoder(struct drm_connector *connector); struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); +enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, @@ -691,6 +693,7 @@ bool intel_crtc_active(struct drm_crtc *crtc); void i915_disable_vga_mem(struct drm_device *dev); void hsw_enable_ips(struct intel_crtc *crtc); void hsw_disable_ips(struct intel_crtc *crtc); +void intel_display_set_init_power(struct drm_device *dev, bool enable); /* intel_dp.c */ @@ -800,10 +803,11 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc, void intel_gmch_panel_fitting(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config, int fitting_mode); -void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max); +void intel_panel_set_backlight(struct intel_connector *connector, u32 level, + u32 max); int intel_panel_setup_backlight(struct drm_connector *connector); -void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe); -void intel_panel_disable_backlight(struct drm_device *dev); +void intel_panel_enable_backlight(struct intel_connector *connector); +void intel_panel_disable_backlight(struct intel_connector *connector); void intel_panel_destroy_backlight(struct drm_device *dev); enum drm_connector_status intel_panel_detect(struct drm_device *dev); @@ -821,15 +825,15 @@ bool intel_fbc_enabled(struct drm_device *dev); void intel_update_fbc(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); -int i915_init_power_well(struct drm_device *dev); -void i915_remove_power_well(struct drm_device *dev); +int intel_power_domains_init(struct drm_device *dev); +void intel_power_domains_remove(struct drm_device *dev); bool intel_display_power_enabled(struct drm_device *dev, enum intel_display_power_domain domain); void intel_display_power_get(struct drm_device *dev, enum intel_display_power_domain domain); void intel_display_power_put(struct drm_device *dev, enum intel_display_power_domain domain); -void intel_init_power_well(struct drm_device *dev); +void intel_power_domains_init_hw(struct drm_device *dev); void intel_set_power_well(struct drm_device *dev, bool enable); void intel_enable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 1b64145c669..3c773654685 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -173,11 +173,16 @@ static void intel_enable_dvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); I915_WRITE(dvo_reg, temp | DVO_ENABLE); I915_READ(dvo_reg); + intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, + &crtc->config.requested_mode, + &crtc->config.adjusted_mode); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); } @@ -186,6 +191,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode) { struct intel_dvo *intel_dvo = intel_attached_dvo(connector); struct drm_crtc *crtc; + struct intel_crtc_config *config; /* dvo supports only 2 dpms states. */ if (mode != DRM_MODE_DPMS_ON) @@ -206,10 +212,16 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode) /* We call connector dpms manually below in case pipe dpms doesn't * change due to cloning. */ if (mode == DRM_MODE_DPMS_ON) { + config = &to_intel_crtc(crtc)->config; + intel_dvo->base.connectors_active = true; intel_crtc_update_dpms(crtc); + intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, + &config->requested_mode, + &config->adjusted_mode); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); } else { intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); @@ -296,10 +308,6 @@ static void intel_dvo_mode_set(struct intel_encoder *encoder) break; } - intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, - &crtc->config.requested_mode, - adjusted_mode); - /* Save the data order, since I don't know what it should be set to. */ dvo_val = I915_READ(dvo_reg) & (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index acc839569c3..895fcb4fbd9 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -78,8 +78,8 @@ static int intelfb_create(struct drm_fb_helper *helper, mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / - 8), 64); + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * + DIV_ROUND_UP(sizes->surface_bpp, 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 51a8336dec2..03f9ca70530 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -847,7 +847,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi) if (IS_G4X(dev)) return 165000; - else if (IS_HASWELL(dev)) + else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) return 300000; else return 225000; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ae0c843dd26..c3b4da7895e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -206,7 +206,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_connector *intel_connector = + &lvds_encoder->attached_connector->base; struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, stat_reg; @@ -225,13 +226,15 @@ static void intel_enable_lvds(struct intel_encoder *encoder) if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) DRM_ERROR("timed out waiting for panel to power on\n"); - intel_panel_enable_backlight(dev, intel_crtc->pipe); + intel_panel_enable_backlight(intel_connector); } static void intel_disable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct intel_connector *intel_connector = + &lvds_encoder->attached_connector->base; struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, stat_reg; @@ -243,7 +246,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder) stat_reg = PP_STATUS; } - intel_panel_disable_backlight(dev); + intel_panel_disable_backlight(intel_connector); I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) @@ -707,6 +710,22 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "Intel D410PT", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), + DMI_MATCH(DMI_BOARD_NAME, "D410PT"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Intel D425KT", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "Intel D510MO", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b82050c96f3..6d69a9bad86 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -396,7 +396,13 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct intel_connector *intel_connector = NULL; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; struct opregion_asle __iomem *asle = dev_priv->opregion.asle; + u32 ret = 0; + bool found = false; DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); @@ -407,11 +413,39 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) if (bclp > 255) return ASLC_BACKLIGHT_FAILED; + mutex_lock(&dev->mode_config.mutex); + /* + * Could match the OpRegion connector here instead, but we'd also need + * to verify the connector could handle a backlight call. + */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + if (encoder->crtc == crtc) { + found = true; + break; + } + + if (!found) { + ret = ASLC_BACKLIGHT_FAILED; + goto out; + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + if (connector->encoder == encoder) + intel_connector = to_intel_connector(connector); + + if (!intel_connector) { + ret = ASLC_BACKLIGHT_FAILED; + goto out; + } + DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); - intel_panel_set_backlight(dev, bclp, 255); + intel_panel_set_backlight(intel_connector, bclp, 255); iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); - return 0; +out: + mutex_unlock(&dev->mode_config.mutex); + + return ret; } static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) @@ -486,9 +520,13 @@ static u32 asle_isct_state(struct drm_device *dev) return ASLC_ISCT_STATE_FAILED; } -void intel_opregion_asle_intr(struct drm_device *dev) +static void asle_work(struct work_struct *work) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = + container_of(work, struct intel_opregion, asle_work); + struct drm_i915_private *dev_priv = + container_of(opregion, struct drm_i915_private, opregion); + struct drm_device *dev = dev_priv->dev; struct opregion_asle __iomem *asle = dev_priv->opregion.asle; u32 aslc_stat = 0; u32 aslc_req; @@ -535,6 +573,14 @@ void intel_opregion_asle_intr(struct drm_device *dev) iowrite32(aslc_stat, &asle->aslc); } +void intel_opregion_asle_intr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->opregion.asle) + schedule_work(&dev_priv->opregion.asle_work); +} + #define ACPI_EV_DISPLAY_SWITCH (1<<0) #define ACPI_EV_LID (1<<1) #define ACPI_EV_DOCK (1<<2) @@ -592,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev) u32 temp; int i = 0; - handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&dev->pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; @@ -735,6 +781,8 @@ void intel_opregion_fini(struct drm_device *dev) if (opregion->asle) iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); + cancel_work_sync(&dev_priv->opregion.asle_work); + if (opregion->acpi) { iowrite32(0, &opregion->acpi->drdy); @@ -828,6 +876,10 @@ int intel_opregion_setup(struct drm_device *dev) return -ENOTSUPP; } +#ifdef CONFIG_ACPI + INIT_WORK(&opregion->asle_work, asle_work); +#endif + base = acpi_os_ioremap(asls, OPREGION_SIZE); if (!base) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 09b2994c9b3..f161ac02c4f 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -341,7 +341,7 @@ static int is_backlight_combination_mode(struct drm_device *dev) /* XXX: query mode clock or hardware clock and program max PWM appropriately * when it's 0. */ -static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) +static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; @@ -358,6 +358,21 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) val = dev_priv->regfile.saveBLC_PWM_CTL2; I915_WRITE(BLC_PWM_PCH_CTL2, val); } + } else if (IS_VALLEYVIEW(dev)) { + val = I915_READ(VLV_BLC_PWM_CTL(pipe)); + if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { + dev_priv->regfile.saveBLC_PWM_CTL = val; + dev_priv->regfile.saveBLC_PWM_CTL2 = + I915_READ(VLV_BLC_PWM_CTL2(pipe)); + } else if (val == 0) { + val = dev_priv->regfile.saveBLC_PWM_CTL; + I915_WRITE(VLV_BLC_PWM_CTL(pipe), val); + I915_WRITE(VLV_BLC_PWM_CTL2(pipe), + dev_priv->regfile.saveBLC_PWM_CTL2); + } + + if (!val) + val = 0x0f42ffff; } else { val = I915_READ(BLC_PWM_CTL); if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { @@ -372,19 +387,17 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); } - - if (IS_VALLEYVIEW(dev) && !val) - val = 0x0f42ffff; } return val; } -static u32 intel_panel_get_max_backlight(struct drm_device *dev) +static u32 intel_panel_get_max_backlight(struct drm_device *dev, + enum pipe pipe) { u32 max; - max = i915_read_blc_pwm_ctl(dev); + max = i915_read_blc_pwm_ctl(dev, pipe); if (HAS_PCH_SPLIT(dev)) { max >>= 16; @@ -410,7 +423,8 @@ MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " "to dri-devel@lists.freedesktop.org, if your machine needs it. " "It will then be included in an upcoming module version."); module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); -static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) +static u32 intel_panel_compute_brightness(struct drm_device *dev, + enum pipe pipe, u32 val) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -419,7 +433,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) if (i915_panel_invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { - u32 max = intel_panel_get_max_backlight(dev); + u32 max = intel_panel_get_max_backlight(dev, pipe); if (max) return max - val; } @@ -427,18 +441,25 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) return val; } -static u32 intel_panel_get_backlight(struct drm_device *dev) +static u32 intel_panel_get_backlight(struct drm_device *dev, + enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; unsigned long flags; + int reg; spin_lock_irqsave(&dev_priv->backlight.lock, flags); if (HAS_PCH_SPLIT(dev)) { val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; } else { - val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + if (IS_VALLEYVIEW(dev)) + reg = VLV_BLC_PWM_CTL(pipe); + else + reg = BLC_PWM_CTL; + + val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK; if (INTEL_INFO(dev)->gen < 4) val >>= 1; @@ -450,7 +471,7 @@ static u32 intel_panel_get_backlight(struct drm_device *dev) } } - val = intel_panel_compute_brightness(dev, val); + val = intel_panel_compute_brightness(dev, pipe, val); spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); @@ -466,19 +487,20 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) } static void intel_panel_actually_set_backlight(struct drm_device *dev, - u32 level) + enum pipe pipe, u32 level) { struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp; + int reg; DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); - level = intel_panel_compute_brightness(dev, level); + level = intel_panel_compute_brightness(dev, pipe, level); if (HAS_PCH_SPLIT(dev)) return intel_pch_panel_set_backlight(dev, level); if (is_backlight_combination_mode(dev)) { - u32 max = intel_panel_get_max_backlight(dev); + u32 max = intel_panel_get_max_backlight(dev, pipe); u8 lbpc; /* we're screwed, but keep behaviour backwards compatible */ @@ -490,23 +512,34 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); } - tmp = I915_READ(BLC_PWM_CTL); + if (IS_VALLEYVIEW(dev)) + reg = VLV_BLC_PWM_CTL(pipe); + else + reg = BLC_PWM_CTL; + + tmp = I915_READ(reg); if (INTEL_INFO(dev)->gen < 4) level <<= 1; tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(BLC_PWM_CTL, tmp | level); + I915_WRITE(reg, tmp | level); } /* set backlight brightness to level in range [0..max] */ -void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) +void intel_panel_set_backlight(struct intel_connector *connector, u32 level, + u32 max) { + struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = intel_get_pipe_from_connector(connector); u32 freq; unsigned long flags; + if (pipe == INVALID_PIPE) + return; + spin_lock_irqsave(&dev_priv->backlight.lock, flags); - freq = intel_panel_get_max_backlight(dev); + freq = intel_panel_get_max_backlight(dev, pipe); if (!freq) { /* we are screwed, bail out */ goto out; @@ -523,16 +556,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) dev_priv->backlight.device->props.brightness = level; if (dev_priv->backlight.enabled) - intel_panel_actually_set_backlight(dev, level); + intel_panel_actually_set_backlight(dev, pipe, level); out: spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); } -void intel_panel_disable_backlight(struct drm_device *dev) +void intel_panel_disable_backlight(struct intel_connector *connector) { + struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = intel_get_pipe_from_connector(connector); unsigned long flags; + if (pipe == INVALID_PIPE) + return; + /* * Do not disable backlight on the vgaswitcheroo path. When switching * away from i915, the other client may depend on i915 to handle the @@ -547,12 +585,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) spin_lock_irqsave(&dev_priv->backlight.lock, flags); dev_priv->backlight.enabled = false; - intel_panel_actually_set_backlight(dev, 0); + intel_panel_actually_set_backlight(dev, pipe, 0); if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; - reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; + if (HAS_PCH_SPLIT(dev)) + reg = BLC_PWM_CPU_CTL2; + else if (IS_VALLEYVIEW(dev)) + reg = VLV_BLC_PWM_CTL2(pipe); + else + reg = BLC_PWM_CTL2; I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); @@ -566,20 +609,25 @@ void intel_panel_disable_backlight(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); } -void intel_panel_enable_backlight(struct drm_device *dev, - enum pipe pipe) +void intel_panel_enable_backlight(struct intel_connector *connector) { + struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = intel_get_pipe_from_connector(connector); enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); unsigned long flags; + if (pipe == INVALID_PIPE) + return; + DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); spin_lock_irqsave(&dev_priv->backlight.lock, flags); if (dev_priv->backlight.level == 0) { - dev_priv->backlight.level = intel_panel_get_max_backlight(dev); + dev_priv->backlight.level = intel_panel_get_max_backlight(dev, + pipe); if (dev_priv->backlight.device) dev_priv->backlight.device->props.brightness = dev_priv->backlight.level; @@ -588,8 +636,12 @@ void intel_panel_enable_backlight(struct drm_device *dev, if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; - reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; - + if (HAS_PCH_SPLIT(dev)) + reg = BLC_PWM_CPU_CTL2; + else if (IS_VALLEYVIEW(dev)) + reg = VLV_BLC_PWM_CTL2(pipe); + else + reg = BLC_PWM_CTL2; tmp = I915_READ(reg); @@ -629,7 +681,8 @@ set_level: * registers are set. */ dev_priv->backlight.enabled = true; - intel_panel_actually_set_backlight(dev, dev_priv->backlight.level); + intel_panel_actually_set_backlight(dev, pipe, + dev_priv->backlight.level); spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); } @@ -640,9 +693,19 @@ static void intel_panel_init_backlight_regs(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; if (IS_VALLEYVIEW(dev)) { - u32 cur_val = I915_READ(BLC_PWM_CTL) & - BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val); + enum pipe pipe; + + for_each_pipe(pipe) { + u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); + + /* Skip if the modulation freq is already set */ + if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK) + continue; + + cur_val &= BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) | + cur_val); + } } } @@ -652,7 +715,7 @@ static void intel_panel_init_backlight(struct drm_device *dev) intel_panel_init_backlight_regs(dev); - dev_priv->backlight.level = intel_panel_get_backlight(dev); + dev_priv->backlight.level = intel_panel_get_backlight(dev, 0); dev_priv->backlight.enabled = dev_priv->backlight.level != 0; } @@ -681,18 +744,31 @@ intel_panel_detect(struct drm_device *dev) #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) static int intel_panel_update_status(struct backlight_device *bd) { - struct drm_device *dev = bl_get_data(bd); + struct intel_connector *connector = bl_get_data(bd); + struct drm_device *dev = connector->base.dev; + + mutex_lock(&dev->mode_config.mutex); DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", bd->props.brightness, bd->props.max_brightness); - intel_panel_set_backlight(dev, bd->props.brightness, + intel_panel_set_backlight(connector, bd->props.brightness, bd->props.max_brightness); + mutex_unlock(&dev->mode_config.mutex); return 0; } static int intel_panel_get_brightness(struct backlight_device *bd) { - struct drm_device *dev = bl_get_data(bd); - return intel_panel_get_backlight(dev); + struct intel_connector *connector = bl_get_data(bd); + struct drm_device *dev = connector->base.dev; + enum pipe pipe; + + mutex_lock(&dev->mode_config.mutex); + pipe = intel_get_pipe_from_connector(connector); + mutex_unlock(&dev->mode_config.mutex); + if (pipe == INVALID_PIPE) + return 0; + + return intel_panel_get_backlight(connector->base.dev, pipe); } static const struct backlight_ops intel_panel_bl_ops = { @@ -717,7 +793,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector) props.brightness = dev_priv->backlight.level; spin_lock_irqsave(&dev_priv->backlight.lock, flags); - props.max_brightness = intel_panel_get_max_backlight(dev); + props.max_brightness = intel_panel_get_max_backlight(dev, 0); spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); if (props.max_brightness == 0) { @@ -726,7 +802,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector) } dev_priv->backlight.device = backlight_device_register("intel_backlight", - connector->kdev, dev, + connector->kdev, + to_intel_connector(connector), &intel_panel_bl_ops, &props); if (IS_ERR(dev_priv->backlight.device)) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8064ff927bc..caf2ee4e544 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -32,6 +32,27 @@ #include <linux/module.h> #include <drm/i915_powerwell.h> +/** + * RC6 is a special power stage which allows the GPU to enter an very + * low-voltage mode when idle, using down to 0V while at this stage. This + * stage is entered automatically when the GPU is idle when RC6 support is + * enabled, and as soon as new workload arises GPU wakes up automatically as well. + * + * There are different RC6 modes available in Intel GPU, which differentiate + * among each other with the latency required to enter and leave RC6 and + * voltage consumed by the GPU in different states. + * + * The combination of the following flags define which states GPU is allowed + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and + * RC6pp is deepest RC6. Their support by hardware varies according to the + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one + * which brings the most power savings; deeper states save more power, but + * require higher latency to switch to and wake up. + */ +#define INTEL_RC6_ENABLE (1<<0) +#define INTEL_RC6p_ENABLE (1<<1) +#define INTEL_RC6pp_ENABLE (1<<2) + /* FBC, or Frame Buffer Compression, is a technique employed to compress the * framebuffer contents in-memory, aiming at reducing the required bandwidth * during in-memory transfers and, therefore, reduce the power packet. @@ -233,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev) dpfc_ctl &= ~DPFC_CTL_EN; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); - if (IS_IVYBRIDGE(dev)) - /* WaFbcDisableDpfcClockGating:ivb */ - I915_WRITE(ILK_DSPCLK_GATE_D, - I915_READ(ILK_DSPCLK_GATE_D) & - ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE); - - if (IS_HASWELL(dev)) - /* WaFbcDisableDpfcClockGating:hsw */ - I915_WRITE(HSW_CLKGATE_DISABLE_PART_1, - I915_READ(HSW_CLKGATE_DISABLE_PART_1) & - ~HSW_DPFC_GATING_DISABLE); - DRM_DEBUG_KMS("disabled FBC\n"); } } @@ -274,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) if (IS_IVYBRIDGE(dev)) { /* WaFbcAsynchFlipDisableFbcQueue:ivb */ I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS); - /* WaFbcDisableDpfcClockGating:ivb */ - I915_WRITE(ILK_DSPCLK_GATE_D, - I915_READ(ILK_DSPCLK_GATE_D) | - ILK_DPFCUNIT_CLOCK_GATE_DISABLE); } else { /* WaFbcAsynchFlipDisableFbcQueue:hsw */ I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe), HSW_BYPASS_FBC_QUEUE); - /* WaFbcDisableDpfcClockGating:hsw */ - I915_WRITE(HSW_CLKGATE_DISABLE_PART_1, - I915_READ(HSW_CLKGATE_DISABLE_PART_1) | - HSW_DPFC_GATING_DISABLE); } I915_WRITE(SNB_DPFC_CTL_SA, @@ -1624,7 +1625,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) &to_intel_crtc(enabled)->config.adjusted_mode; int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->htotal; - int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; + int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; int pixel_size = enabled->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; @@ -2290,7 +2291,9 @@ static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params, static unsigned int ilk_display_fifo_size(const struct drm_device *dev) { - if (INTEL_INFO(dev)->gen >= 7) + if (INTEL_INFO(dev)->gen >= 8) + return 3072; + else if (INTEL_INFO(dev)->gen >= 7) return 768; else return 512; @@ -2335,7 +2338,9 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev, } /* clamp to max that the registers can hold */ - if (INTEL_INFO(dev)->gen >= 7) + if (INTEL_INFO(dev)->gen >= 8) + max = level == 0 ? 255 : 2047; + else if (INTEL_INFO(dev)->gen >= 7) /* IVB/HSW primary/sprite plane watermarks */ max = level == 0 ? 127 : 1023; else if (!is_sprite) @@ -2365,10 +2370,13 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, } /* Calculate the maximum FBC watermark */ -static unsigned int ilk_fbc_wm_max(void) +static unsigned int ilk_fbc_wm_max(struct drm_device *dev) { /* max that registers can hold */ - return 15; + if (INTEL_INFO(dev)->gen >= 8) + return 31; + else + return 15; } static void ilk_compute_wm_maximums(struct drm_device *dev, @@ -2380,7 +2388,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev, max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); max->cur = ilk_cursor_wm_max(dev, level, config); - max->fbc = ilk_fbc_wm_max(); + max->fbc = ilk_fbc_wm_max(dev); } static bool ilk_validate_wm_level(int level, @@ -2721,10 +2729,18 @@ static void hsw_compute_wm_results(struct drm_device *dev, if (!r->enable) break; - results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, - r->fbc_val, - r->pri_val, - r->cur_val); + results->wm_lp[wm_lp - 1] = WM3_LP_EN | + ((level * 2) << WM1_LP_LATENCY_SHIFT) | + (r->pri_val << WM1_LP_SR_SHIFT) | + r->cur_val; + + if (INTEL_INFO(dev)->gen >= 8) + results->wm_lp[wm_lp - 1] |= + r->fbc_val << WM1_LP_FBC_SHIFT_BDW; + else + results->wm_lp[wm_lp - 1] |= + r->fbc_val << WM1_LP_FBC_SHIFT; + results->wm_lp_spr[wm_lp - 1] = r->spr_val; } @@ -3685,6 +3701,20 @@ static void valleyview_disable_rps(struct drm_device *dev) } } +static void intel_print_rc6_info(struct drm_device *dev, u32 mode) +{ + if (IS_GEN6(dev)) + DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); + + if (IS_HASWELL(dev)) + DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); + + DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); +} + int intel_enable_rc6(const struct drm_device *dev) { /* No RC6 before Ironlake */ @@ -3699,18 +3729,13 @@ int intel_enable_rc6(const struct drm_device *dev) if (INTEL_INFO(dev)->gen == 5) return 0; - if (IS_HASWELL(dev)) { - DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); + if (IS_HASWELL(dev)) return INTEL_RC6_ENABLE; - } /* snb/ivb have more than one rc6 state. */ - if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); + if (INTEL_INFO(dev)->gen == 6) return INTEL_RC6_ENABLE; - } - DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } @@ -3737,6 +3762,78 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); } +static void gen8_enable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + uint32_t rc6_mask = 0, rp_state_cap; + int unused; + + /* 1a: Software RC state - RC0 */ + I915_WRITE(GEN6_RC_STATE, 0); + + /* 1c & 1d: Get forcewake during program sequence. Although the driver + * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ + gen6_gt_force_wake_get(dev_priv); + + /* 2a: Disable RC states. */ + I915_WRITE(GEN6_RC_CONTROL, 0); + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + + /* 2b: Program RC6 thresholds.*/ + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_ring(ring, dev_priv, unused) + I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + I915_WRITE(GEN6_RC_SLEEP, 0); + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + + /* 3: Enable RC6 */ + if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + rc6_mask = GEN6_RC_CTL_RC6_ENABLE; + DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); + I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_EI_MODE(1) | + rc6_mask); + + /* 4 Program defaults and thresholds for RPS*/ + I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */ + I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */ + /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ + + /* Docs recommend 900MHz, and 300 MHz respectively */ + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + dev_priv->rps.max_delay << 24 | + dev_priv->rps.min_delay << 16); + + I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ + I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ + I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ + + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + + /* 5: Enable RPS */ + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + + /* 6: Ring frequency + overclocking (our driver does this later */ + + gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); + + gen6_enable_rps_interrupts(dev); + + gen6_gt_force_wake_put(dev_priv); +} + static void gen6_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3791,7 +3888,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev)) I915_WRITE(GEN6_RC6_THRESHOLD, 125000); else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); @@ -3812,10 +3909,7 @@ static void gen6_enable_rps(struct drm_device *dev) rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; } - DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", - (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", - (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + intel_print_rc6_info(dev, rc6_mask); I915_WRITE(GEN6_RC_CONTROL, rc6_mask | @@ -3888,7 +3982,7 @@ void gen6_update_ring_freq(struct drm_device *dev) /* Convert from kHz to MHz */ max_ia_freq /= 1000; - min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf; + min_ring_freq = I915_READ(DCLK) & 0xf; /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); @@ -3902,7 +3996,10 @@ void gen6_update_ring_freq(struct drm_device *dev) int diff = dev_priv->rps.max_delay - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (IS_HASWELL(dev)) { + if (INTEL_INFO(dev)->gen >= 8) { + /* max(2 * GT, DDR). NB: GT is 50MHz units */ + ring_freq = max(min_ring_freq, gpu_freq); + } else if (IS_HASWELL(dev)) { ring_freq = mult_frac(gpu_freq, 5, 4); ring_freq = max(min_ring_freq, ring_freq); /* leave ia_freq as the default, chosen by cpufreq */ @@ -4051,6 +4148,9 @@ static void valleyview_enable_rps(struct drm_device *dev) VLV_RENDER_RC6_COUNT_EN)); if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE; + + intel_print_rc6_info(dev, rc6_mode); + I915_WRITE(GEN6_RC_CONTROL, rc6_mode); val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); @@ -4222,6 +4322,8 @@ static void ironlake_enable_rc6(struct drm_device *dev) I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); + + intel_print_rc6_info(dev, INTEL_RC6_ENABLE); } static unsigned long intel_pxfreq(u32 vidfreq) @@ -4861,6 +4963,9 @@ static void intel_gen6_powersave_work(struct work_struct *work) if (IS_VALLEYVIEW(dev)) { valleyview_enable_rps(dev); + } else if (IS_BROADWELL(dev)) { + gen8_enable_rps(dev); + gen6_update_ring_freq(dev); } else { gen6_enable_rps(dev); gen6_update_ring_freq(dev); @@ -4996,7 +5101,9 @@ static void cpt_init_clock_gating(struct drm_device *dev) * gating for the panel power sequencer or it will fail to * start up when no ports are active. */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | + PCH_DPLUNIT_CLOCK_GATE_DISABLE | + PCH_CPUNIT_CLOCK_GATE_DISABLE); I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | DPLS_EDP_PPS_FIX_DIS); /* The below fixes the weird display corruption, a few pixels shifted @@ -5167,6 +5274,50 @@ static void lpt_suspend_hw(struct drm_device *dev) } } +static void gen8_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe i; + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* FIXME(BDW): Check all the w/a, some might only apply to + * pre-production hw. */ + + WARN(!i915_preliminary_hw_support, + "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n"); + I915_WRITE(HALF_SLICE_CHICKEN3, + _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS)); + I915_WRITE(HALF_SLICE_CHICKEN3, + _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); + I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); + + I915_WRITE(_3D_CHICKEN3, + _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)); + + I915_WRITE(COMMON_SLICE_CHICKEN2, + _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); + + I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, + _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); + + /* WaSwitchSolVfFArbitrationPriority */ + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); + + /* WaPsrDPAMaskVBlankInSRD */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); + + /* WaPsrDPRSUnmaskVBlankInSRD */ + for_each_pipe(i) { + I915_WRITE(CHICKEN_PIPESL_1(i), + I915_READ(CHICKEN_PIPESL_1(i) | + DPRS_MASK_VBLANK_SRD)); + } +} + static void haswell_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5190,6 +5341,11 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); + /* L3 caching of data atomics doesn't work -- disable it. */ + I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); + I915_WRITE(HSW_ROW_CHICKEN3, + _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); + /* This is required by WaCatErrorRejectionIssue:hsw */ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | @@ -5485,6 +5641,25 @@ void intel_suspend_hw(struct drm_device *dev) lpt_suspend_hw(dev); } +static bool is_always_on_power_domain(struct drm_device *dev, + enum intel_display_power_domain domain) +{ + unsigned long always_on_domains; + + BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK); + + if (IS_BROADWELL(dev)) { + always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS; + } else if (IS_HASWELL(dev)) { + always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS; + } else { + WARN_ON(1); + return true; + } + + return BIT(domain) & always_on_domains; +} + /** * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -5498,24 +5673,11 @@ bool intel_display_power_enabled(struct drm_device *dev, if (!HAS_POWER_WELL(dev)) return true; - switch (domain) { - case POWER_DOMAIN_PIPE_A: - case POWER_DOMAIN_TRANSCODER_EDP: + if (is_always_on_power_domain(dev, domain)) return true; - case POWER_DOMAIN_VGA: - case POWER_DOMAIN_PIPE_B: - case POWER_DOMAIN_PIPE_C: - case POWER_DOMAIN_PIPE_A_PANEL_FITTER: - case POWER_DOMAIN_PIPE_B_PANEL_FITTER: - case POWER_DOMAIN_PIPE_C_PANEL_FITTER: - case POWER_DOMAIN_TRANSCODER_A: - case POWER_DOMAIN_TRANSCODER_B: - case POWER_DOMAIN_TRANSCODER_C: - return I915_READ(HSW_PWR_WELL_DRIVER) == + + return I915_READ(HSW_PWR_WELL_DRIVER) == (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); - default: - BUG(); - } } static void __intel_set_power_well(struct drm_device *dev, bool enable) @@ -5565,169 +5727,130 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) } } -static void __intel_power_well_get(struct i915_power_well *power_well) +static void __intel_power_well_get(struct drm_device *dev, + struct i915_power_well *power_well) { if (!power_well->count++) - __intel_set_power_well(power_well->device, true); + __intel_set_power_well(dev, true); } -static void __intel_power_well_put(struct i915_power_well *power_well) +static void __intel_power_well_put(struct drm_device *dev, + struct i915_power_well *power_well) { WARN_ON(!power_well->count); - if (!--power_well->count) - __intel_set_power_well(power_well->device, false); + if (!--power_well->count && i915_disable_power_well) + __intel_set_power_well(dev, false); } void intel_display_power_get(struct drm_device *dev, enum intel_display_power_domain domain) { struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_power_well *power_well = &dev_priv->power_well; + struct i915_power_domains *power_domains; if (!HAS_POWER_WELL(dev)) return; - switch (domain) { - case POWER_DOMAIN_PIPE_A: - case POWER_DOMAIN_TRANSCODER_EDP: + if (is_always_on_power_domain(dev, domain)) return; - case POWER_DOMAIN_VGA: - case POWER_DOMAIN_PIPE_B: - case POWER_DOMAIN_PIPE_C: - case POWER_DOMAIN_PIPE_A_PANEL_FITTER: - case POWER_DOMAIN_PIPE_B_PANEL_FITTER: - case POWER_DOMAIN_PIPE_C_PANEL_FITTER: - case POWER_DOMAIN_TRANSCODER_A: - case POWER_DOMAIN_TRANSCODER_B: - case POWER_DOMAIN_TRANSCODER_C: - spin_lock_irq(&power_well->lock); - __intel_power_well_get(power_well); - spin_unlock_irq(&power_well->lock); - return; - default: - BUG(); - } + + power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + __intel_power_well_get(dev, &power_domains->power_wells[0]); + mutex_unlock(&power_domains->lock); } void intel_display_power_put(struct drm_device *dev, enum intel_display_power_domain domain) { struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_power_well *power_well = &dev_priv->power_well; + struct i915_power_domains *power_domains; if (!HAS_POWER_WELL(dev)) return; - switch (domain) { - case POWER_DOMAIN_PIPE_A: - case POWER_DOMAIN_TRANSCODER_EDP: + if (is_always_on_power_domain(dev, domain)) return; - case POWER_DOMAIN_VGA: - case POWER_DOMAIN_PIPE_B: - case POWER_DOMAIN_PIPE_C: - case POWER_DOMAIN_PIPE_A_PANEL_FITTER: - case POWER_DOMAIN_PIPE_B_PANEL_FITTER: - case POWER_DOMAIN_PIPE_C_PANEL_FITTER: - case POWER_DOMAIN_TRANSCODER_A: - case POWER_DOMAIN_TRANSCODER_B: - case POWER_DOMAIN_TRANSCODER_C: - spin_lock_irq(&power_well->lock); - __intel_power_well_put(power_well); - spin_unlock_irq(&power_well->lock); - return; - default: - BUG(); - } + + power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + __intel_power_well_put(dev, &power_domains->power_wells[0]); + mutex_unlock(&power_domains->lock); } -static struct i915_power_well *hsw_pwr; +static struct i915_power_domains *hsw_pwr; /* Display audio driver power well request */ void i915_request_power_well(void) { + struct drm_i915_private *dev_priv; + if (WARN_ON(!hsw_pwr)) return; - spin_lock_irq(&hsw_pwr->lock); - __intel_power_well_get(hsw_pwr); - spin_unlock_irq(&hsw_pwr->lock); + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + + mutex_lock(&hsw_pwr->lock); + __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]); + mutex_unlock(&hsw_pwr->lock); } EXPORT_SYMBOL_GPL(i915_request_power_well); /* Display audio driver power well release */ void i915_release_power_well(void) { + struct drm_i915_private *dev_priv; + if (WARN_ON(!hsw_pwr)) return; - spin_lock_irq(&hsw_pwr->lock); - __intel_power_well_put(hsw_pwr); - spin_unlock_irq(&hsw_pwr->lock); + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + + mutex_lock(&hsw_pwr->lock); + __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]); + mutex_unlock(&hsw_pwr->lock); } EXPORT_SYMBOL_GPL(i915_release_power_well); -int i915_init_power_well(struct drm_device *dev) +int intel_power_domains_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; - hsw_pwr = &dev_priv->power_well; + mutex_init(&power_domains->lock); + hsw_pwr = power_domains; - hsw_pwr->device = dev; - spin_lock_init(&hsw_pwr->lock); - hsw_pwr->count = 0; + power_well = &power_domains->power_wells[0]; + power_well->count = 0; return 0; } -void i915_remove_power_well(struct drm_device *dev) +void intel_power_domains_remove(struct drm_device *dev) { hsw_pwr = NULL; } -void intel_set_power_well(struct drm_device *dev, bool enable) +static void intel_power_domains_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_power_well *power_well = &dev_priv->power_well; + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; if (!HAS_POWER_WELL(dev)) return; - if (!i915_disable_power_well && !enable) - return; - - spin_lock_irq(&power_well->lock); + mutex_lock(&power_domains->lock); - /* - * This function will only ever contribute one - * to the power well reference count. i915_request - * is what tracks whether we have or have not - * added the one to the reference count. - */ - if (power_well->i915_request == enable) - goto out; - - power_well->i915_request = enable; - - if (enable) - __intel_power_well_get(power_well); - else - __intel_power_well_put(power_well); - - out: - spin_unlock_irq(&power_well->lock); -} - -static void intel_resume_power_well(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_power_well *power_well = &dev_priv->power_well; - - if (!HAS_POWER_WELL(dev)) - return; - - spin_lock_irq(&power_well->lock); + power_well = &power_domains->power_wells[0]; __intel_set_power_well(dev, power_well->count > 0); - spin_unlock_irq(&power_well->lock); + + mutex_unlock(&power_domains->lock); } /* @@ -5736,7 +5859,7 @@ static void intel_resume_power_well(struct drm_device *dev) * to be enabled, and it will only be disabled if none of the registers is * requesting it to be enabled. */ -void intel_init_power_well(struct drm_device *dev) +void intel_power_domains_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5744,8 +5867,8 @@ void intel_init_power_well(struct drm_device *dev) return; /* For now, we need the power well to be always enabled. */ - intel_set_power_well(dev, true); - intel_resume_power_well(dev); + intel_display_set_init_power(dev, true); + intel_power_domains_resume(dev); /* We're taking over the BIOS, so clear any requests made by it since * the driver is in charge now. */ @@ -5849,6 +5972,8 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = haswell_init_clock_gating; + } else if (INTEL_INFO(dev)->gen == 8) { + dev_priv->display.init_clock_gating = gen8_init_clock_gating; } else dev_priv->display.update_wm = NULL; } else if (IS_VALLEYVIEW(dev)) { @@ -6011,4 +6136,3 @@ void intel_pm_init(struct drm_device *dev) INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work); } - diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2dec134f75e..b620337e6d6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -360,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, return 0; } +static int +gen8_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, u32 flush_domains) +{ + u32 flags = 0; + u32 scratch_addr = ring->scratch.gtt_offset + 128; + int ret; + + flags |= PIPE_CONTROL_CS_STALL; + + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + } + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; + +} + static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { @@ -1066,6 +1107,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } +static bool +gen8_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return false; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { + if (HAS_L3_DPF(dev) && ring->id == RCS) { + I915_WRITE_IMR(ring, + ~(ring->irq_enable_mask | + GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); + } else { + I915_WRITE_IMR(ring, ~ring->irq_enable_mask); + } + POSTING_READ(RING_IMR(ring->mmio_base)); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return true; +} + +static void +gen8_ring_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { + if (HAS_L3_DPF(dev) && ring->id == RCS) { + I915_WRITE_IMR(ring, + ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); + } else { + I915_WRITE_IMR(ring, ~0); + } + POSTING_READ(RING_IMR(ring->mmio_base)); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); +} + static int i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length, @@ -1624,6 +1711,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; + if (INTEL_INFO(ring->dev)->gen >= 8) + cmd += 1; /* * Bspec vol 1c.5 - video engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush @@ -1635,9 +1724,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; intel_ring_emit(ring, cmd); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + if (INTEL_INFO(ring->dev)->gen >= 8) { + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ + } else { + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + } + intel_ring_advance(ring); + return 0; +} + +static int +gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len, + unsigned flags) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && + !(flags & I915_DISPATCH_SECURE); + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + /* FIXME(BDW): Address space and security selectors. */ + intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); + intel_ring_emit(ring, offset); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); + return 0; } @@ -1697,6 +1815,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; + if (INTEL_INFO(ring->dev)->gen >= 8) + cmd += 1; /* * Bspec vol 1c.3 - blitter engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush @@ -1708,8 +1828,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, MI_FLUSH_DW_OP_STOREDW; intel_ring_emit(ring, cmd); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, MI_NOOP); + if (INTEL_INFO(ring->dev)->gen >= 8) { + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ + } else { + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + } intel_ring_advance(ring); if (IS_GEN7(dev) && flush) @@ -1732,8 +1857,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->flush = gen7_render_ring_flush; if (INTEL_INFO(dev)->gen == 6) ring->flush = gen6_render_ring_flush; - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; + if (INTEL_INFO(dev)->gen >= 8) { + ring->flush = gen8_render_ring_flush; + ring->irq_get = gen8_ring_get_irq; + ring->irq_put = gen8_ring_put_irq; + } else { + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + } ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; @@ -1775,6 +1906,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->write_tail = ring_write_tail; if (IS_HASWELL(dev)) ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; + else if (IS_GEN8(dev)) + ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; else if (INTEL_INFO(dev)->gen >= 6) ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; else if (INTEL_INFO(dev)->gen >= 4) @@ -1888,7 +2021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->id = VCS; ring->write_tail = ring_write_tail; - if (IS_GEN6(dev) || IS_GEN7(dev)) { + if (INTEL_INFO(dev)->gen >= 6) { ring->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ if (IS_GEN6(dev)) @@ -1897,10 +2030,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (INTEL_INFO(dev)->gen >= 8) { + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; + ring->irq_get = gen8_ring_get_irq; + ring->irq_put = gen8_ring_put_irq; + ring->dispatch_execbuffer = + gen8_ring_dispatch_execbuffer; + } else { + ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = + gen6_ring_dispatch_execbuffer; + } ring->sync_to = gen6_ring_sync; ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; @@ -1946,10 +2089,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (INTEL_INFO(dev)->gen >= 8) { + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; + ring->irq_get = gen8_ring_get_irq; + ring->irq_put = gen8_ring_put_irq; + ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + } else { + ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + } ring->sync_to = gen6_ring_sync; ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; @@ -1978,10 +2129,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; - ring->irq_get = hsw_vebox_get_irq; - ring->irq_put = hsw_vebox_put_irq; - ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + + if (INTEL_INFO(dev)->gen >= 8) { + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; + ring->irq_get = gen8_ring_get_irq; + ring->irq_put = gen8_ring_put_irq; + ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + } else { + ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; + ring->irq_get = hsw_vebox_get_irq; + ring->irq_put = hsw_vebox_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + } ring->sync_to = gen6_ring_sync; ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 07b13dcb7fd..b9fabf826f7 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -260,14 +260,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (obj->tiling_mode != I915_TILING_NONE) sprctl |= SPRITE_TILED; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; else sprctl |= SPRITE_TRICKLE_FEED_DISABLE; sprctl |= SPRITE_ENABLE; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, @@ -306,7 +306,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) I915_WRITE(SPROFFSET(pipe), (y << 16) | x); else if (obj->tiling_mode != I915_TILING_NONE) I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); @@ -1092,6 +1092,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) break; case 7: + case 8: if (IS_IVYBRIDGE(dev)) { intel_plane->can_scale = true; intel_plane->max_downscale = 2; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 18c406246a2..22cf0f4ba24 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector, } +static void +intel_tv_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; +} + static bool intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) @@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_ENCODER_TVDAC); intel_encoder->compute_config = intel_tv_compute_config; + intel_encoder->get_config = intel_tv_get_config; intel_encoder->mode_set = intel_tv_mode_set; intel_encoder->enable = intel_enable_tv; intel_encoder->disable = intel_disable_tv; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f6fae35c568..0b02078a0b8 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -93,7 +93,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { u32 forcewake_ack; - if (IS_HASWELL(dev_priv->dev)) + if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) forcewake_ack = FORCEWAKE_ACK_HSW; else forcewake_ack = FORCEWAKE_MT_ACK; @@ -112,7 +112,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); /* WaRsForcewakeWaitTC0:ivb,hsw */ - __gen6_gt_wait_for_thread_c0(dev_priv); + if (INTEL_INFO(dev_priv->dev)->gen < 8) + __gen6_gt_wait_for_thread_c0(dev_priv); } static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) @@ -216,6 +217,19 @@ static void gen6_force_wake_work(struct work_struct *work) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static void intel_uncore_forcewake_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + vlv_force_wake_reset(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + __gen6_gt_force_wake_reset(dev_priv); + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + __gen6_gt_force_wake_mt_reset(dev_priv); + } +} + void intel_uncore_early_sanitize(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -233,19 +247,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev) dev_priv->ellc_size = 128; DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); } -} - -static void intel_uncore_forcewake_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_VALLEYVIEW(dev)) { - vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); - } + intel_uncore_forcewake_reset(dev); } void intel_uncore_sanitize(struct drm_device *dev) @@ -459,6 +462,46 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ } +static const u32 gen8_shadowed_regs[] = { + FORCEWAKE_MT, + GEN6_RPNSWREQ, + GEN6_RC_VIDEO_FREQ, + RING_TAIL(RENDER_RING_BASE), + RING_TAIL(GEN6_BSD_RING_BASE), + RING_TAIL(VEBOX_RING_BASE), + RING_TAIL(BLT_RING_BASE), + /* TODO: Other registers are not yet used */ +}; + +static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) + if (reg == gen8_shadowed_regs[i]) + return true; + + return false; +} + +#define __gen8_write(x) \ +static void \ +gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ + bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \ + REG_WRITE_HEADER; \ + if (__needs_put) { \ + dev_priv->uncore.funcs.force_wake_get(dev_priv); \ + } \ + __raw_i915_write##x(dev_priv, reg, val); \ + if (__needs_put) { \ + dev_priv->uncore.funcs.force_wake_put(dev_priv); \ + } \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ +} + +__gen8_write(8) +__gen8_write(16) +__gen8_write(32) +__gen8_write(64) __hsw_write(8) __hsw_write(16) __hsw_write(32) @@ -476,6 +519,7 @@ __gen4_write(16) __gen4_write(32) __gen4_write(64) +#undef __gen8_write #undef __hsw_write #undef __gen6_write #undef __gen5_write @@ -492,7 +536,7 @@ void intel_uncore_init(struct drm_device *dev) if (IS_VALLEYVIEW(dev)) { dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; } else if (IS_IVYBRIDGE(dev)) { @@ -534,6 +578,16 @@ void intel_uncore_init(struct drm_device *dev) } switch (INTEL_INFO(dev)->gen) { + default: + dev_priv->uncore.funcs.mmio_writeb = gen8_write8; + dev_priv->uncore.funcs.mmio_writew = gen8_write16; + dev_priv->uncore.funcs.mmio_writel = gen8_write32; + dev_priv->uncore.funcs.mmio_writeq = gen8_write64; + dev_priv->uncore.funcs.mmio_readb = gen6_read8; + dev_priv->uncore.funcs.mmio_readw = gen6_read16; + dev_priv->uncore.funcs.mmio_readl = gen6_read32; + dev_priv->uncore.funcs.mmio_readq = gen6_read64; + break; case 7: case 6: if (IS_HASWELL(dev)) { |