diff options
85 files changed, 1307 insertions, 710 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 233a222752c..4f8760d7b7a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -688,6 +688,7 @@ config ARCH_DAVINCI select HAVE_IDE select COMMON_CLKDEV select GENERIC_ALLOCATOR + select ARCH_HAS_HOLES_MEMORYMODEL help Support for TI's DaVinci platform. diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index a9b650dcc17..077ecf4fecd 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -236,6 +236,7 @@ static struct vpfe_subdev_info vpfe_sub_devs[] = { static struct vpfe_config vpfe_cfg = { .num_subdevs = ARRAY_SIZE(vpfe_sub_devs), + .i2c_adapter_id = 1, .sub_devs = vpfe_sub_devs, .card_name = "DM355 EVM", .ccdc = "DM355 CCDC", diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 289fe1b7d25..b476395d2cd 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -192,7 +192,11 @@ static struct davinci_i2c_platform_data i2c_pdata = { .bus_delay = 0 /* usec */, }; -#ifdef CONFIG_KEYBOARD_DAVINCI +static int dm365evm_keyscan_enable(struct device *dev) +{ + return davinci_cfg_reg(DM365_KEYSCAN); +} + static unsigned short dm365evm_keymap[] = { KEY_KP2, KEY_LEFT, @@ -214,6 +218,7 @@ static unsigned short dm365evm_keymap[] = { }; static struct davinci_ks_platform_data dm365evm_ks_data = { + .device_enable = dm365evm_keyscan_enable, .keymap = dm365evm_keymap, .keymapsize = ARRAY_SIZE(dm365evm_keymap), .rep = 1, @@ -222,7 +227,6 @@ static struct davinci_ks_platform_data dm365evm_ks_data = { .interval = 0x2, .matrix_type = DAVINCI_KEYSCAN_MATRIX_4X4, }; -#endif static int cpld_mmc_get_cd(int module) { @@ -511,10 +515,7 @@ static __init void dm365_evm_init(void) dm365_init_asp(&dm365_evm_snd_data); dm365_init_rtc(); - -#ifdef CONFIG_KEYBOARD_DAVINCI dm365_init_ks(&dm365evm_ks_data); -#endif } static __init void dm365_evm_irq_init(void) diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index fd0398bc6db..e9612cf727b 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -247,6 +247,7 @@ static struct vpfe_subdev_info vpfe_sub_devs[] = { static struct vpfe_config vpfe_cfg = { .num_subdevs = ARRAY_SIZE(vpfe_sub_devs), + .i2c_adapter_id = 1, .sub_devs = vpfe_sub_devs, .card_name = "DM6446 EVM", .ccdc = "DM6446 CCDC", diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c index 52b287cf3a4..37311d1830e 100644 --- a/arch/arm/mach-davinci/cp_intc.c +++ b/arch/arm/mach-davinci/cp_intc.c @@ -81,12 +81,23 @@ static int cp_intc_set_irq_type(unsigned int irq, unsigned int flow_type) return 0; } +/* + * Faking this allows us to to work with suspend functions of + * generic drivers which call {enable|disable}_irq_wake for + * wake up interrupt sources (eg RTC on DA850). + */ +static int cp_intc_set_wake(unsigned int irq, unsigned int on) +{ + return 0; +} + static struct irq_chip cp_intc_irq_chip = { .name = "cp_intc", .ack = cp_intc_ack_irq, .mask = cp_intc_mask_irq, .unmask = cp_intc_unmask_irq, .set_type = cp_intc_set_irq_type, + .set_wake = cp_intc_set_wake, }; void __init cp_intc_init(void __iomem *base, unsigned short num_irq, diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index dd2d32c4ce8..a5105f03fd8 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -481,11 +481,18 @@ static struct platform_device da8xx_rtc_device = { int da8xx_register_rtc(void) { + int ret; + /* Unlock the rtc's registers */ __raw_writel(0x83e70b13, IO_ADDRESS(DA8XX_RTC_BASE + 0x6c)); __raw_writel(0x95a4f1e0, IO_ADDRESS(DA8XX_RTC_BASE + 0x70)); - return platform_device_register(&da8xx_rtc_device); + ret = platform_device_register(&da8xx_rtc_device); + if (!ret) + /* Atleast on DA850, RTC is a wakeup source */ + device_init_wakeup(&da8xx_rtc_device.dev, true); + + return ret; } static struct resource da8xx_cpuidle_resources[] = { diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 2ec619ec165..f53735cb922 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -993,7 +993,6 @@ void __init dm365_init_asp(struct snd_platform_data *pdata) void __init dm365_init_ks(struct davinci_ks_platform_data *pdata) { - davinci_cfg_reg(DM365_KEYSCAN); dm365_ks_device.dev.platform_data = pdata; platform_device_register(&dm365_ks_device); } diff --git a/arch/arm/plat-s3c64xx/s3c6400-clock.c b/arch/arm/plat-s3c64xx/s3c6400-clock.c index 6ffa21eb1b9..ffd56deb9e8 100644 --- a/arch/arm/plat-s3c64xx/s3c6400-clock.c +++ b/arch/arm/plat-s3c64xx/s3c6400-clock.c @@ -46,6 +46,7 @@ static struct clk clk_ext_xtal_mux = { #define clk_fin_epll clk_ext_xtal_mux #define clk_fout_mpll clk_mpll +#define clk_fout_epll clk_epll struct clk_sources { unsigned int nr_sources; @@ -88,11 +89,6 @@ static struct clksrc_clk clk_mout_apll = { .sources = &clk_src_apll, }; -static struct clk clk_fout_epll = { - .name = "fout_epll", - .id = -1, -}; - static struct clk *clk_src_epll_list[] = { [0] = &clk_fin_epll, [1] = &clk_fout_epll, @@ -715,7 +711,6 @@ static struct clk *clks[] __initdata = { &clk_iis_cd1, &clk_pcm_cd, &clk_mout_epll.clk, - &clk_fout_epll, &clk_mout_mpll.clk, &clk_dout_mpll, &clk_mmc0.clk, @@ -760,7 +755,4 @@ void __init s3c6400_register_clocks(unsigned armclk_divlimit) clkp->name, ret); } } - - clk_mpll.parent = &clk_mout_mpll.clk; - clk_epll.parent = &clk_mout_epll.clk; } diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h index 944a07c6cfd..1d04e407834 100644 --- a/arch/blackfin/include/asm/page.h +++ b/arch/blackfin/include/asm/page.h @@ -10,4 +10,9 @@ #include <asm-generic/page.h> #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) +#define VM_DATA_DEFAULT_FLAGS \ + (VM_READ | VM_WRITE | \ + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + #endif diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h index 25c6a500235..8c97068ac8f 100644 --- a/arch/frv/include/asm/page.h +++ b/arch/frv/include/asm/page.h @@ -63,12 +63,10 @@ extern unsigned long max_pfn; #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#ifdef CONFIG_MMU #define VM_DATA_DEFAULT_FLAGS \ (VM_READ | VM_WRITE | \ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#endif #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 0c9825e97f3..088d09fb161 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -205,14 +205,13 @@ static inline unsigned long __must_check copy_from_user(void *to, unsigned long n) { int sz = __compiletime_object_size(to); - int ret = -EFAULT; if (likely(sz == -1 || sz >= n)) - ret = _copy_from_user(to, from, n); + n = _copy_from_user(to, from, n); else copy_from_user_overflow(); - return ret; + return n; } long __must_check strncpy_from_user(char *dst, const char __user *src, diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 46324c6a4f6..535e421498f 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -30,16 +30,15 @@ static inline unsigned long __must_check copy_from_user(void *to, unsigned long n) { int sz = __compiletime_object_size(to); - int ret = -EFAULT; might_fault(); if (likely(sz == -1 || sz >= n)) - ret = _copy_from_user(to, from, n); + n = _copy_from_user(to, from, n); #ifdef CONFIG_DEBUG_VM else WARN(1, "Buffer overflow detected!\n"); #endif - return ret; + return n; } static __always_inline __must_check diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index de00c4619a5..53243ca7816 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2434,6 +2434,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) cfg = irq_cfg(irq); raw_spin_lock(&desc->lock); + /* + * Check if the irq migration is in progress. If so, we + * haven't received the cleanup request yet for this irq. + */ + if (cfg->move_in_progress) + goto unlock; + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) goto unlock; diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 05ed7ab2ca4..a1a7876cadc 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -733,13 +733,13 @@ struct early_res { }; static struct early_res early_res[MAX_EARLY_RES] __initdata = { { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE) /* * But first pinch a few for the stack/trampoline stuff * FIXME: Don't need the extra page at 4K, but need to fix * trampoline before removing it. (see the GDT stuff) */ - { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 }, + { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 }, #endif {} diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c index b7a55dc55d1..f81a2fa8fe2 100644 --- a/arch/x86/pci/intel_bus.c +++ b/arch/x86/pci/intel_bus.c @@ -49,6 +49,10 @@ static void __devinit pci_root_bus_res(struct pci_dev *dev) u64 mmioh_base, mmioh_end; int bus_base, bus_end; + /* some sys doesn't get mmconf enabled */ + if (dev->cfg_size < 0x120) + return; + if (pci_root_num >= PCI_ROOT_NR) { printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n"); return; diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index 628eae3e9b8..a1fce68e3bb 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c @@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, - PAGE_SIZE, - gart_info->table_mask); + PAGE_SIZE); if (gart_info->table_handle == NULL) return -ENOMEM; @@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); + if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { + DRM_ERROR("fail to set dma mask to 0x%Lx\n", + gart_info->table_mask); + ret = 1; + goto done; + } + ret = drm_ati_alloc_pcigart_table(dev, gart_info); if (ret) { DRM_ERROR("cannot allocate PCI GART page!\n"); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 3d09e304f6f..8417cc4c43f 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, * As we're limiting the address to 2^32-1 (or less), * casting it down to 32 bits is no problem, but we * need to point to a 64bit variable first. */ - dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); + dmah = drm_pci_alloc(dev, map->size, map->size); if (!dmah) { kfree(map); return -ENOMEM; @@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) while (entry->buf_count < count) { - dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); + dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); if (!dmah) { /* Set count correctly so we free the proper amount. */ diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5c9f79877cb..defcaf10846 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -911,23 +911,27 @@ static int drm_cvt_modes(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct cvt_timing *cvt; const int rates[] = { 60, 85, 75, 60, 50 }; + const u8 empty[3] = { 0, 0, 0 }; for (i = 0; i < 4; i++) { int uninitialized_var(width), height; cvt = &(timing->data.other_data.data.cvt[i]); - height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; - switch (cvt->code[1] & 0xc0) { + if (!memcmp(cvt->code, empty, 3)) + continue; + + height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; + switch (cvt->code[1] & 0x0c) { case 0x00: width = height * 4 / 3; break; - case 0x40: + case 0x04: width = height * 16 / 9; break; - case 0x80: + case 0x08: width = height * 16 / 10; break; - case 0xc0: + case 0x0c: width = height * 15 / 9; break; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1b49fa055f4..100ee48760b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con force = DRM_FORCE_ON; break; case 'D': - if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) || + if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) force = DRM_FORCE_ON; else diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 577094fb199..e68ebf92fa2 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -47,8 +47,7 @@ /** * \brief Allocate a PCI consistent memory block, for DMA. */ -drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, - dma_addr_t maxaddr) +drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) { drm_dma_handle_t *dmah; #if 1 @@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali if (align > size) return NULL; - if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) { - DRM_ERROR("Setting pci dma mask failed\n"); - return NULL; - } - dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); if (!dmah) return NULL; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 18476bf0b58..9c9998c4dce 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co mem = kmap_atomic(pages[page], KM_USER0); for (i = 0; i < PAGE_SIZE; i += 4) seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); - kunmap_atomic(pages[page], KM_USER0); + kunmap_atomic(mem, KM_USER0); } } @@ -386,34 +386,6 @@ out: return 0; } -static int i915_registers_info(struct seq_file *m, void *data) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t reg; - -#define DUMP_RANGE(start, end) \ - for (reg=start; reg < end; reg += 4) \ - seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); - - DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ - DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ - DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ - DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ - DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ - DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ - DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ - DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ - DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ - DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ - DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ - DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ - DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ - DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ - - return 0; -} - static int i915_wedged_open(struct inode *inode, struct file *filp) @@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) } static struct drm_info_list i915_debugfs_list[] = { - {"i915_regs", i915_registers_info, 0}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 701bfeac7f5..bbe47812e4b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; /* Program Hardware Status Page */ dev_priv->status_page_dmah = - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) { DRM_ERROR("Can not allocate hardware status page\n"); @@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_PAGEFLIPPING: value = 1; break; + case I915_PARAM_HAS_EXECBUF2: + /* depends on GEM */ + value = dev_priv->has_gem; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", - param->param); + param->param); return -EINVAL; } @@ -1117,7 +1121,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mm_node *compressed_fb, *compressed_llb; - unsigned long cfb_base, ll_base; + unsigned long cfb_base; + unsigned long ll_base = 0; /* Leave 1M for line length buffer & misc. */ compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); @@ -1200,14 +1205,6 @@ static int i915_load_modeset_init(struct drm_device *dev, dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 0xff000000; - if (IS_MOBILE(dev) || IS_I9XX(dev)) - dev_priv->cursor_needs_physical = true; - else - dev_priv->cursor_needs_physical = false; - - if (IS_I965G(dev) || IS_G33(dev)) - dev_priv->cursor_needs_physical = false; - /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); @@ -1257,6 +1254,8 @@ static int i915_load_modeset_init(struct drm_device *dev, if (ret) goto destroy_ringbuffer; + intel_modeset_init(dev); + ret = drm_irq_install(dev); if (ret) goto destroy_ringbuffer; @@ -1271,8 +1270,6 @@ static int i915_load_modeset_init(struct drm_device *dev, I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); - intel_modeset_init(dev); - drm_helper_initial_config(dev); return 0; @@ -1360,7 +1357,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv = dev->dev_private; resource_size_t base, size; - int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; + int ret = 0, mmio_bar; uint32_t agp_size, prealloc_size, prealloc_start; /* i915 has 4 more counters */ @@ -1376,8 +1373,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; + dev_priv->info = (struct intel_device_info *) flags; /* Add register map (needed for suspend/resume) */ + mmio_bar = IS_I9XX(dev) ? 0 : 1; base = drm_get_resource_start(dev, mmio_bar); size = drm_get_resource_len(dev, mmio_bar); @@ -1652,6 +1651,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 24286ca168f..2ffffd7ae09 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -33,7 +33,6 @@ #include "i915_drm.h" #include "i915_drv.h" -#include "drm_pciids.h" #include <linux/console.h> #include "drm_crtc_helper.h" @@ -48,8 +47,124 @@ module_param_named(powersave, i915_powersave, int, 0400); static struct drm_driver driver; -static struct pci_device_id pciidlist[] = { - i915_PCI_IDS +#define INTEL_VGA_DEVICE(id, info) { \ + .class = PCI_CLASS_DISPLAY_VGA << 8, \ + .class_mask = 0xffff00, \ + .vendor = 0x8086, \ + .device = id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .driver_data = (unsigned long) info } + +const static struct intel_device_info intel_i830_info = { + .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, +}; + +const static struct intel_device_info intel_845g_info = { + .is_i8xx = 1, +}; + +const static struct intel_device_info intel_i85x_info = { + .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, +}; + +const static struct intel_device_info intel_i865g_info = { + .is_i8xx = 1, +}; + +const static struct intel_device_info intel_i915g_info = { + .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, +}; +const static struct intel_device_info intel_i915gm_info = { + .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, + .cursor_needs_physical = 1, +}; +const static struct intel_device_info intel_i945g_info = { + .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, +}; +const static struct intel_device_info intel_i945gm_info = { + .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, + .has_hotplug = 1, .cursor_needs_physical = 1, +}; + +const static struct intel_device_info intel_i965g_info = { + .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, +}; + +const static struct intel_device_info intel_i965gm_info = { + .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, + .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, + .has_hotplug = 1, +}; + +const static struct intel_device_info intel_g33_info = { + .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_hotplug = 1, +}; + +const static struct intel_device_info intel_g45_info = { + .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_pipe_cxsr = 1, + .has_hotplug = 1, +}; + +const static struct intel_device_info intel_gm45_info = { + .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, + .has_pipe_cxsr = 1, + .has_hotplug = 1, +}; + +const static struct intel_device_info intel_pineview_info = { + .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, + .has_pipe_cxsr = 1, + .has_hotplug = 1, +}; + +const static struct intel_device_info intel_ironlake_d_info = { + .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_pipe_cxsr = 1, + .has_hotplug = 1, +}; + +const static struct intel_device_info intel_ironlake_m_info = { + .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, + .need_gfx_hws = 1, .has_rc6 = 1, + .has_hotplug = 1, +}; + +const static struct pci_device_id pciidlist[] = { + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), + INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), + INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), + INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), + INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), + INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), + INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), + INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), + INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), + INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), + INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), + INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), + INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), + INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), + INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), + INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), + INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), + INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), + INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), + INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), + INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), + INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), + INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), + INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), + INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), + INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), + INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), + INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), + {0, 0, 0} }; #if defined(CONFIG_DRM_I915_KMS) @@ -284,6 +399,52 @@ i915_pci_resume(struct pci_dev *pdev) return i915_resume(dev); } +static int +i915_pm_suspend(struct device *dev) +{ + return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); +} + +static int +i915_pm_resume(struct device *dev) +{ + return i915_pci_resume(to_pci_dev(dev)); +} + +static int +i915_pm_freeze(struct device *dev) +{ + return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); +} + +static int +i915_pm_thaw(struct device *dev) +{ + /* thaw during hibernate, do nothing! */ + return 0; +} + +static int +i915_pm_poweroff(struct device *dev) +{ + return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); +} + +static int +i915_pm_restore(struct device *dev) +{ + return i915_pci_resume(to_pci_dev(dev)); +} + +const struct dev_pm_ops i915_pm_ops = { + .suspend = i915_pm_suspend, + .resume = i915_pm_resume, + .freeze = i915_pm_freeze, + .thaw = i915_pm_thaw, + .poweroff = i915_pm_poweroff, + .restore = i915_pm_restore, +}; + static struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, .open = drm_gem_vm_open, @@ -303,8 +464,6 @@ static struct drm_driver driver = { .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, .postclose = i915_driver_postclose, - .suspend = i915_suspend, - .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, .enable_vblank = i915_enable_vblank, .disable_vblank = i915_disable_vblank, @@ -344,10 +503,7 @@ static struct drm_driver driver = { .id_table = pciidlist, .probe = i915_pci_probe, .remove = i915_pci_remove, -#ifdef CONFIG_PM - .resume = i915_pci_resume, - .suspend = i915_pci_suspend, -#endif + .driver.pm = &i915_pm_ops, }, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fbecac72f5b..29dd6762696 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -172,9 +172,31 @@ struct drm_i915_display_funcs { struct intel_overlay; +struct intel_device_info { + u8 is_mobile : 1; + u8 is_i8xx : 1; + u8 is_i915g : 1; + u8 is_i9xx : 1; + u8 is_i945gm : 1; + u8 is_i965g : 1; + u8 is_i965gm : 1; + u8 is_g33 : 1; + u8 need_gfx_hws : 1; + u8 is_g4x : 1; + u8 is_pineview : 1; + u8 is_ironlake : 1; + u8 has_fbc : 1; + u8 has_rc6 : 1; + u8 has_pipe_cxsr : 1; + u8 has_hotplug : 1; + u8 cursor_needs_physical : 1; +}; + typedef struct drm_i915_private { struct drm_device *dev; + const struct intel_device_info *info; + int has_gem; void __iomem *regs; @@ -232,8 +254,6 @@ typedef struct drm_i915_private { int hangcheck_count; uint32_t last_acthd; - bool cursor_needs_physical; - struct drm_mm vram; unsigned long cfb_size; @@ -287,8 +307,6 @@ typedef struct drm_i915_private { u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; - u32 saveRENDERSTANDBY; - u32 savePWRCTXA; u32 saveHWS; u32 savePIPEACONF; u32 savePIPEBCONF; @@ -561,6 +579,7 @@ typedef struct drm_i915_private { u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; + struct drm_connector *int_lvds_connector; } drm_i915_private_t; /** driver private structure attached to each drm_gem_object */ @@ -794,6 +813,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_file *file_priv); int i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, @@ -860,6 +881,9 @@ void i915_gem_shrinker_exit(void); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); +bool i915_tiling_ok(struct drm_device *dev, int stride, int size, + int tiling_mode); +bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_gem_object *obj, int len, @@ -982,67 +1006,33 @@ extern void g4x_disable_fbc(struct drm_device *dev); extern int i915_wrap_ring(struct drm_device * dev); extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); -#define IS_I830(dev) ((dev)->pci_device == 0x3577) -#define IS_845G(dev) ((dev)->pci_device == 0x2562) -#define IS_I85X(dev) ((dev)->pci_device == 0x3582) -#define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) - -#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) -#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) -#define IS_I945G(dev) ((dev)->pci_device == 0x2772) -#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ - (dev)->pci_device == 0x27AE) -#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ - (dev)->pci_device == 0x2982 || \ - (dev)->pci_device == 0x2992 || \ - (dev)->pci_device == 0x29A2 || \ - (dev)->pci_device == 0x2A02 || \ - (dev)->pci_device == 0x2A12 || \ - (dev)->pci_device == 0x2A42 || \ - (dev)->pci_device == 0x2E02 || \ - (dev)->pci_device == 0x2E12 || \ - (dev)->pci_device == 0x2E22 || \ - (dev)->pci_device == 0x2E32 || \ - (dev)->pci_device == 0x2E42 || \ - (dev)->pci_device == 0x0042 || \ - (dev)->pci_device == 0x0046) - -#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ - (dev)->pci_device == 0x2A12) - -#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) - -#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ - (dev)->pci_device == 0x2E12 || \ - (dev)->pci_device == 0x2E22 || \ - (dev)->pci_device == 0x2E32 || \ - (dev)->pci_device == 0x2E42 || \ - IS_GM45(dev)) - -#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) -#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) -#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev)) - -#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ - (dev)->pci_device == 0x29B2 || \ - (dev)->pci_device == 0x29D2 || \ - (IS_PINEVIEW(dev))) - +#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) + +#define IS_I830(dev) ((dev)->pci_device == 0x3577) +#define IS_845G(dev) ((dev)->pci_device == 0x2562) +#define IS_I85X(dev) ((dev)->pci_device == 0x3582) +#define IS_I865G(dev) ((dev)->pci_device == 0x2572) +#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) +#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) +#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) +#define IS_I945G(dev) ((dev)->pci_device == 0x2772) +#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) +#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) +#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) +#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) +#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) +#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) +#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) +#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) +#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) -#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) - -#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ - IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ - IS_IRONLAKE(dev)) +#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) +#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) +#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) -#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ - IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ - IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev)) +#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) -#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ - IS_IRONLAKE(dev)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ @@ -1054,17 +1044,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) -#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) +#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) -#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) -#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ - (IS_I9XX(dev) || IS_GM45(dev)) && \ - !IS_PINEVIEW(dev) && \ - !IS_IRONLAKE(dev)) -#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev)) +#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) +#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) +#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8c463cf2050..2748609f05b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) /* blow away mappings if mapped through GTT */ i915_gem_release_mmap(obj); - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - i915_gem_clear_fence_reg(obj); - /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT * are flushed when we go to remap it. This will @@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->active); + /* release the fence reg _after_ flushing */ + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + i915_gem_clear_fence_reg(obj); + if (obj_priv->agp_mem != NULL) { drm_unbind_agp(obj_priv->agp_mem); drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); @@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) bool retry_alloc = false; int ret; - if (dev_priv->mm.suspended) - return -EBUSY; - if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to bind a purgeable object\n"); return -EINVAL; @@ -3198,7 +3196,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, static int i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_file *file_priv, - struct drm_i915_gem_exec_object *entry, + struct drm_i915_gem_exec_object2 *entry, struct drm_i915_gem_relocation_entry *relocs) { struct drm_device *dev = obj->dev; @@ -3206,12 +3204,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_i915_gem_object *obj_priv = obj->driver_private; int i, ret; void __iomem *reloc_page; + bool need_fence; + + need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj_priv->tiling_mode != I915_TILING_NONE; + + /* Check fence reg constraints and rebind if necessary */ + if (need_fence && !i915_obj_fenceable(dev, obj)) + i915_gem_object_unbind(obj); /* Choose the GTT offset for our buffer and put it there. */ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); if (ret) return ret; + /* + * Pre-965 chips need a fence register set up in order to + * properly handle blits to/from tiled surfaces. + */ + if (need_fence) { + ret = i915_gem_object_get_fence_reg(obj); + if (ret != 0) { + if (ret != -EBUSY && ret != -ERESTARTSYS) + DRM_ERROR("Failure to install fence: %d\n", + ret); + i915_gem_object_unpin(obj); + return ret; + } + } + entry->offset = obj_priv->gtt_offset; /* Apply the relocations, using the GTT aperture to avoid cache @@ -3373,7 +3394,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, */ static int i915_dispatch_gem_execbuffer(struct drm_device *dev, - struct drm_i915_gem_execbuffer *exec, + struct drm_i915_gem_execbuffer2 *exec, struct drm_clip_rect *cliprects, uint64_t exec_offset) { @@ -3463,7 +3484,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) } static int -i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, +i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t buffer_count, struct drm_i915_gem_relocation_entry **relocs) { @@ -3478,8 +3499,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, } *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); - if (*relocs == NULL) + if (*relocs == NULL) { + DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); return -ENOMEM; + } for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; @@ -3503,7 +3526,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, } static int -i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, +i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, uint32_t buffer_count, struct drm_i915_gem_relocation_entry *relocs) { @@ -3536,7 +3559,7 @@ err: } static int -i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, +i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, uint64_t exec_offset) { uint32_t exec_start, exec_len; @@ -3589,18 +3612,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, } int -i915_gem_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv) +i915_gem_do_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv, + struct drm_i915_gem_execbuffer2 *args, + struct drm_i915_gem_exec_object2 *exec_list) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_execbuffer *args = data; - struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; struct drm_i915_gem_relocation_entry *relocs; - int ret, ret2, i, pinned = 0; + int ret = 0, ret2, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains, reloc_index; int pin_tries, flips; @@ -3614,25 +3637,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; } - /* Copy in the exec list from userland */ - exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); - if (exec_list == NULL || object_list == NULL) { - DRM_ERROR("Failed to allocate exec or object list " - "for %d buffers\n", + if (object_list == NULL) { + DRM_ERROR("Failed to allocate object list for %d buffers\n", args->buffer_count); ret = -ENOMEM; goto pre_mutex_err; } - ret = copy_from_user(exec_list, - (struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - sizeof(*exec_list) * args->buffer_count); - if (ret != 0) { - DRM_ERROR("copy %d exec entries failed %d\n", - args->buffer_count, ret); - goto pre_mutex_err; - } if (args->num_cliprects != 0) { cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), @@ -3884,20 +3895,6 @@ err: mutex_unlock(&dev->struct_mutex); - if (!ret) { - /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); - } - } - /* Copy the updated relocations out regardless of current error * state. Failure to update the relocs would mean that the next * time userland calls execbuf, it would do so with presumed offset @@ -3914,12 +3911,158 @@ err: pre_mutex_err: drm_free_large(object_list); - drm_free_large(exec_list); kfree(cliprects); return ret; } +/* + * Legacy execbuffer just creates an exec2 list from the original exec object + * list array and passes it to the real function. + */ +int +i915_gem_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_execbuffer *args = data; + struct drm_i915_gem_execbuffer2 exec2; + struct drm_i915_gem_exec_object *exec_list = NULL; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret, i; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + /* Copy in the exec list from userland */ + exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + if (exec_list == NULL || exec2_list == NULL) { + DRM_ERROR("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -ENOMEM; + } + ret = copy_from_user(exec_list, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + sizeof(*exec_list) * args->buffer_count); + if (ret != 0) { + DRM_ERROR("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -EFAULT; + } + + for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].handle = exec_list[i].handle; + exec2_list[i].relocation_count = exec_list[i].relocation_count; + exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; + exec2_list[i].alignment = exec_list[i].alignment; + exec2_list[i].offset = exec_list[i].offset; + if (!IS_I965G(dev)) + exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; + else + exec2_list[i].flags = 0; + } + + exec2.buffers_ptr = args->buffers_ptr; + exec2.buffer_count = args->buffer_count; + exec2.batch_start_offset = args->batch_start_offset; + exec2.batch_len = args->batch_len; + exec2.DR1 = args->DR1; + exec2.DR4 = args->DR4; + exec2.num_cliprects = args->num_cliprects; + exec2.cliprects_ptr = args->cliprects_ptr; + exec2.flags = 0; + + ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + for (i = 0; i < args->buffer_count; i++) + exec_list[i].offset = exec2_list[i].offset; + /* ... and back out to userspace */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } else { + DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret); + } + + drm_free_large(exec_list); + drm_free_large(exec2_list); + return ret; +} + +int +i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_execbuffer2 *args = data; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + if (exec2_list == NULL) { + DRM_ERROR("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + return -ENOMEM; + } + ret = copy_from_user(exec2_list, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + sizeof(*exec2_list) * args->buffer_count); + if (ret != 0) { + DRM_ERROR("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec2_list); + return -EFAULT; + } + + ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec2_list, + sizeof(*exec2_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + drm_free_large(exec2_list); + return ret; +} + int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) { @@ -3933,19 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (ret) return ret; } - /* - * Pre-965 chips need a fence register set up in order to - * properly handle tiled surfaces. - */ - if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj); - if (ret != 0) { - if (ret != -EBUSY && ret != -ERESTARTSYS) - DRM_ERROR("Failure to install fence: %d\n", - ret); - return ret; - } - } + obj_priv->pin_count++; /* If the object is not active and not pending a flush, @@ -4708,7 +4839,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, phys_obj->id = id; - phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); + phys_obj->handle = drm_pci_alloc(dev, size, 0); if (!phys_obj->handle) { ret = -ENOMEM; goto kfree_obj; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 30d6af6c09b..df278b2685b 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) /** - * Returns the size of the fence for a tiled object of the given size. + * Returns whether an object is currently fenceable. If not, it may need + * to be unbound and have its pitch adjusted. */ -static int -i915_get_fence_size(struct drm_device *dev, int size) +bool +i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj) { - int i; - int start; + struct drm_i915_gem_object *obj_priv = obj->driver_private; if (IS_I965G(dev)) { /* The 965 can have fences at any page boundary. */ - return ALIGN(size, 4096); + if (obj->size & 4095) + return false; + return true; + } else if (IS_I9XX(dev)) { + if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + return false; } else { - /* Align the size to a power of two greater than the smallest - * fence size. - */ - if (IS_I9XX(dev)) - start = 1024 * 1024; - else - start = 512 * 1024; + if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) + return false; + } - for (i = start; i < size; i <<= 1) - ; + /* Power of two sized... */ + if (obj->size & (obj->size - 1)) + return false; - return i; - } + /* Objects must be size aligned as well */ + if (obj_priv->gtt_offset & (obj->size - 1)) + return false; + return true; } /* Check pitch constriants for all chips & tiling formats */ -static bool +bool i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) { int tile_width; @@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) if (stride & (stride - 1)) return false; - /* We don't 0handle the aperture area covered by the fence being bigger - * than the object size. - */ - if (i915_get_fence_size(dev, size) != size) - return false; - return true; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 85f4c5de97e..7cd8110051b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -313,6 +313,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) dev_priv->mm.irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); DRM_WAKEUP(&dev_priv->irq_queue); + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); } if (de_iir & DE_GSE) @@ -1084,6 +1086,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev) (void) I915_READ(IER); } +/* + * Must be called after intel_modeset_init or hotplug interrupts won't be + * enabled correctly. + */ int i915_driver_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -1106,19 +1112,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev) if (I915_HAS_HOTPLUG(dev)) { u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); - /* Leave other bits alone */ - hotplug_en |= HOTPLUG_EN_MASK; + /* Note HDMI and DP share bits */ + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) + hotplug_en |= CRT_HOTPLUG_INT_EN; + /* Ignore TV since it's buggy */ + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); - dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | - TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | - SDVOB_HOTPLUG_INT_STATUS; - if (IS_G4X(dev)) { - dev_priv->hotplug_supported_mask |= - HDMIB_HOTPLUG_INT_STATUS | - HDMIC_HOTPLUG_INT_STATUS | - HDMID_HOTPLUG_INT_STATUS; - } /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 974b3cf7061..149d360d64a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -879,13 +879,6 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f -#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ - HDMIC_HOTPLUG_INT_EN | \ - HDMID_HOTPLUG_INT_EN | \ - SDVOB_HOTPLUG_INT_EN | \ - SDVOC_HOTPLUG_INT_EN | \ - CRT_HOTPLUG_INT_EN) - #define PORT_HOTPLUG_STAT 0x61114 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) @@ -982,6 +975,8 @@ #define LVDS_PORT_EN (1 << 31) /* Selects pipe B for LVDS data. Must be set on pre-965. */ #define LVDS_PIPEB_SELECT (1 << 30) +/* LVDS dithering flag on 965/g4x platform */ +#define LVDS_ENABLE_DITHER (1 << 25) /* Enable border for unscaled (or aspect-scaled) display */ #define LVDS_BORDER_ENABLE (1 << 15) /* @@ -1751,6 +1746,8 @@ /* Display & cursor control */ +/* dithering flag on Ironlake */ +#define PIPE_ENABLE_DITHER (1 << 4) /* Pipe A */ #define PIPEADSL 0x70000 #define PIPEACONF 0x70008 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d5ebb00a9d4..a3b90c9561d 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev) pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); - /* Render Standby */ - if (I915_HAS_RC6(dev)) { - dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); - dev_priv->savePWRCTXA = I915_READ(PWRCTXA); - } - /* Hardware status page */ dev_priv->saveHWS = I915_READ(HWS_PGA); @@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev) pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); - /* Render Standby */ - if (I915_HAS_RC6(dev)) { - I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); - I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA); - } - /* Hardware status page */ I915_WRITE(HWS_PGA, dev_priv->saveHWS); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9f3d3e56341..ddefc871edf 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -548,4 +548,6 @@ void intel_crt_init(struct drm_device *dev) drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); + + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 52cd9b006da..002612fae71 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -262,6 +262,14 @@ struct intel_limit { #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ +#define IRONLAKE_P_DISPLAY_PORT_MIN 10 +#define IRONLAKE_P_DISPLAY_PORT_MAX 20 +#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 +#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 +#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 +#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 +#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 + static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); @@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, static bool intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); -static bool -intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, - int target, int refclk, intel_clock_t *best_clock); static bool intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, @@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = { .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, - .find_pll = intel_ironlake_find_best_PLL, + .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_lvds = { @@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = { .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, .p2_slow = IRONLAKE_P2_LVDS_SLOW, .p2_fast = IRONLAKE_P2_LVDS_FAST }, - .find_pll = intel_ironlake_find_best_PLL, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_display_port = { + .dot = { .min = IRONLAKE_DOT_MIN, + .max = IRONLAKE_DOT_MAX }, + .vco = { .min = IRONLAKE_VCO_MIN, + .max = IRONLAKE_VCO_MAX}, + .n = { .min = IRONLAKE_N_MIN, + .max = IRONLAKE_N_MAX }, + .m = { .min = IRONLAKE_M_MIN, + .max = IRONLAKE_M_MAX }, + .m1 = { .min = IRONLAKE_M1_MIN, + .max = IRONLAKE_M1_MAX }, + .m2 = { .min = IRONLAKE_M2_MIN, + .max = IRONLAKE_M2_MAX }, + .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, + .max = IRONLAKE_P_DISPLAY_PORT_MAX }, + .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, + .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, + .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, + .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, + .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, + .find_pll = intel_find_pll_ironlake_dp, }; static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) @@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_ironlake_lvds; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + HAS_eDP) + limit = &intel_limits_ironlake_display_port; else limit = &intel_limits_ironlake_sdvo; @@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, found = false; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == + int lvds_reg; + + if (IS_IRONLAKE(dev)) + lvds_reg = PCH_LVDS; + else + lvds_reg = LVDS; + if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) clock.p2 = limit->p2.p2_fast; else @@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; intel_clock_t clock; + + /* return directly when it is eDP */ + if (HAS_eDP) + return true; + if (target < 200000) { clock.n = 1; clock.p1 = 2; @@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, return true; } -static bool -intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, - int target, int refclk, intel_clock_t *best_clock) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - intel_clock_t clock; - int err_most = 47; - int err_min = 10000; - - /* eDP has only 2 clock choice, no n/m/p setting */ - if (HAS_eDP) - return true; - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) - return intel_find_pll_ironlake_dp(limit, crtc, target, - refclk, best_clock); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == - LVDS_CLKB_POWER_UP) - clock.p2 = limit->p2.p2_fast; - else - clock.p2 = limit->p2.p2_slow; - } else { - if (target < limit->p2.dot_limit) - clock.p2 = limit->p2.p2_slow; - else - clock.p2 = limit->p2.p2_fast; - } - - memset(best_clock, 0, sizeof(*best_clock)); - for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { - /* based on hardware requriment prefer smaller n to precision */ - for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { - /* based on hardware requirment prefere larger m1,m2 */ - for (clock.m1 = limit->m1.max; - clock.m1 >= limit->m1.min; clock.m1--) { - for (clock.m2 = limit->m2.max; - clock.m2 >= limit->m2.min; clock.m2--) { - int this_err; - - intel_clock(dev, refclk, &clock); - if (!intel_PLL_is_valid(crtc, &clock)) - continue; - this_err = abs((10000 - (target*10000/clock.dot))); - if (this_err < err_most) { - *best_clock = clock; - /* found on first matching */ - goto out; - } else if (this_err < err_min) { - *best_clock = clock; - err_min = this_err; - } - } - } - } - } -out: - return true; -} - /* DisplayPort has only two frequencies, 162MHz and 270MHz */ static bool intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, @@ -1493,6 +1473,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; u32 temp; int tries = 5, j, n; + u32 pipe_bpc; + + temp = I915_READ(pipeconf_reg); + pipe_bpc = temp & PIPE_BPC_MASK; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. @@ -1524,6 +1508,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ temp = I915_READ(fdi_rx_reg); + /* + * make the BPC in FDI Rx be consistent with that in + * pipeconf reg. + */ + temp &= ~(0x7 << 16); + temp |= (pipe_bpc << 11); I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | FDI_SEL_PCDCLK | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ @@ -1666,6 +1656,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) /* enable PCH transcoder */ temp = I915_READ(transconf_reg); + /* + * make the BPC in transcoder be consistent with + * that in pipeconf reg. + */ + temp &= ~PIPE_BPC_MASK; + temp |= pipe_bpc; I915_WRITE(transconf_reg, temp | TRANS_ENABLE); I915_READ(transconf_reg); @@ -1745,6 +1741,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(fdi_tx_reg); temp = I915_READ(fdi_rx_reg); + /* BPC in FDI rx is consistent with that in pipeconf */ + temp &= ~(0x07 << 16); + temp |= (pipe_bpc << 11); I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); I915_READ(fdi_rx_reg); @@ -1789,7 +1788,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) } } } - + temp = I915_READ(transconf_reg); + /* BPC in transcoder is consistent with that in pipeconf */ + temp &= ~PIPE_BPC_MASK; + temp |= pipe_bpc; + I915_WRITE(transconf_reg, temp); + I915_READ(transconf_reg); udelay(100); /* disable PCH DPLL */ @@ -2448,7 +2452,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, * A value of 5us seems to be a good balance; safe for very low end * platforms but not overly aggressive on lower latency configs. */ -const static int latency_ns = 5000; +static const int latency_ns = 5000; static int i9xx_get_fifo_size(struct drm_device *dev, int plane) { @@ -2559,7 +2563,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, /* Calc sr entries for one plane configs */ if (sr_hdisplay && (!planea_clock || !planeb_clock)) { /* self-refresh has much higher latency */ - const static int sr_latency_ns = 12000; + static const int sr_latency_ns = 12000; sr_clock = planea_clock ? planea_clock : planeb_clock; line_time_us = ((sr_hdisplay * 1000) / sr_clock); @@ -2598,7 +2602,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, /* Calc sr entries for one plane configs */ if (sr_hdisplay && (!planea_clock || !planeb_clock)) { /* self-refresh has much higher latency */ - const static int sr_latency_ns = 12000; + static const int sr_latency_ns = 12000; sr_clock = planea_clock ? planea_clock : planeb_clock; line_time_us = ((sr_hdisplay * 1000) / sr_clock); @@ -2667,7 +2671,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, if (HAS_FW_BLC(dev) && sr_hdisplay && (!planea_clock || !planeb_clock)) { /* self-refresh has much higher latency */ - const static int sr_latency_ns = 6000; + static const int sr_latency_ns = 6000; sr_clock = planea_clock ? planea_clock : planeb_clock; line_time_us = ((sr_hdisplay * 1000) / sr_clock); @@ -2969,6 +2973,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* determine panel color depth */ temp = I915_READ(pipeconf_reg); + temp &= ~PIPE_BPC_MASK; + if (is_lvds) { + int lvds_reg = I915_READ(PCH_LVDS); + /* the BPC will be 6 if it is 18-bit LVDS panel */ + if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) + temp |= PIPE_8BPC; + else + temp |= PIPE_6BPC; + } else + temp |= PIPE_8BPC; + I915_WRITE(pipeconf_reg, temp); + I915_READ(pipeconf_reg); switch (temp & PIPE_BPC_MASK) { case PIPE_8BPC: @@ -3195,7 +3211,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. */ - + /* set the dithering flag */ + if (IS_I965G(dev)) { + if (dev_priv->lvds_dither) { + if (IS_IRONLAKE(dev)) + pipeconf |= PIPE_ENABLE_DITHER; + else + lvds |= LVDS_ENABLE_DITHER; + } else { + if (IS_IRONLAKE(dev)) + pipeconf &= ~PIPE_ENABLE_DITHER; + else + lvds &= ~LVDS_ENABLE_DITHER; + } + } I915_WRITE(lvds_reg, lvds); I915_READ(lvds_reg); } @@ -3385,7 +3414,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); - if (!dev_priv->cursor_needs_physical) { + if (!dev_priv->info->cursor_needs_physical) { ret = i915_gem_object_pin(bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); @@ -3420,7 +3449,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, I915_WRITE(base, addr); if (intel_crtc->cursor_bo) { - if (dev_priv->cursor_needs_physical) { + if (dev_priv->info->cursor_needs_physical) { if (intel_crtc->cursor_bo != bo) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else @@ -3779,125 +3808,6 @@ static void intel_gpu_idle_timer(unsigned long arg) queue_work(dev_priv->wq, &dev_priv->idle_work); } -void intel_increase_renderclock(struct drm_device *dev, bool schedule) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - - if (IS_IRONLAKE(dev)) - return; - - if (!dev_priv->render_reclock_avail) { - DRM_DEBUG_DRIVER("not reclocking render clock\n"); - return; - } - - /* Restore render clock frequency to original value */ - if (IS_G4X(dev) || IS_I9XX(dev)) - pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); - else if (IS_I85X(dev)) - pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); - DRM_DEBUG_DRIVER("increasing render clock frequency\n"); - - /* Schedule downclock */ - if (schedule) - mod_timer(&dev_priv->idle_timer, jiffies + - msecs_to_jiffies(GPU_IDLE_TIMEOUT)); -} - -void intel_decrease_renderclock(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - - if (IS_IRONLAKE(dev)) - return; - - if (!dev_priv->render_reclock_avail) { - DRM_DEBUG_DRIVER("not reclocking render clock\n"); - return; - } - - if (IS_G4X(dev)) { - u16 gcfgc; - - /* Adjust render clock... */ - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); - - /* Down to minimum... */ - gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; - gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; - - pci_write_config_word(dev->pdev, GCFGC, gcfgc); - } else if (IS_I965G(dev)) { - u16 gcfgc; - - /* Adjust render clock... */ - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); - - /* Down to minimum... */ - gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; - gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; - - pci_write_config_word(dev->pdev, GCFGC, gcfgc); - } else if (IS_I945G(dev) || IS_I945GM(dev)) { - u16 gcfgc; - - /* Adjust render clock... */ - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); - - /* Down to minimum... */ - gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; - gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; - - pci_write_config_word(dev->pdev, GCFGC, gcfgc); - } else if (IS_I915G(dev)) { - u16 gcfgc; - - /* Adjust render clock... */ - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); - - /* Down to minimum... */ - gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; - gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; - - pci_write_config_word(dev->pdev, GCFGC, gcfgc); - } else if (IS_I85X(dev)) { - u16 hpllcc; - - /* Adjust render clock... */ - pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); - - /* Up to maximum... */ - hpllcc &= ~GC_CLOCK_CONTROL_MASK; - hpllcc |= GC_CLOCK_133_200; - - pci_write_config_word(dev->pdev, HPLLCC, hpllcc); - } - DRM_DEBUG_DRIVER("decreasing render clock frequency\n"); -} - -/* Note that no increase function is needed for this - increase_renderclock() - * will also rewrite these bits - */ -void intel_decrease_displayclock(struct drm_device *dev) -{ - if (IS_IRONLAKE(dev)) - return; - - if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || - IS_I915GM(dev)) { - u16 gcfgc; - - /* Adjust render clock... */ - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); - - /* Down to minimum... */ - gcfgc &= ~0xf0; - gcfgc |= 0x80; - - pci_write_config_word(dev->pdev, GCFGC, gcfgc); - } -} - #define CRTC_IDLE_TIMEOUT 1000 /* ms */ static void intel_crtc_idle_timer(unsigned long arg) @@ -4011,12 +3921,6 @@ static void intel_idle_update(struct work_struct *work) mutex_lock(&dev->struct_mutex); - /* GPU isn't processing, downclock it. */ - if (!dev_priv->busy) { - intel_decrease_renderclock(dev); - intel_decrease_displayclock(dev); - } - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) @@ -4050,13 +3954,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; - if (!dev_priv->busy) { + if (!dev_priv->busy) dev_priv->busy = true; - intel_increase_renderclock(dev, true); - } else { + else mod_timer(&dev_priv->idle_timer, jiffies + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); - } list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (!crtc->fb) @@ -4400,29 +4302,43 @@ static void intel_setup_outputs(struct drm_device *dev) bool found = false; if (I915_READ(SDVOB) & SDVO_DETECTED) { + DRM_DEBUG_KMS("probing SDVOB\n"); found = intel_sdvo_init(dev, SDVOB); - if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { + DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); intel_hdmi_init(dev, SDVOB); + } - if (!found && SUPPORTS_INTEGRATED_DP(dev)) + if (!found && SUPPORTS_INTEGRATED_DP(dev)) { + DRM_DEBUG_KMS("probing DP_B\n"); intel_dp_init(dev, DP_B); + } } /* Before G4X SDVOC doesn't have its own detect register */ - if (I915_READ(SDVOB) & SDVO_DETECTED) + if (I915_READ(SDVOB) & SDVO_DETECTED) { + DRM_DEBUG_KMS("probing SDVOC\n"); found = intel_sdvo_init(dev, SDVOC); + } if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { - if (SUPPORTS_INTEGRATED_HDMI(dev)) + if (SUPPORTS_INTEGRATED_HDMI(dev)) { + DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); intel_hdmi_init(dev, SDVOC); - if (SUPPORTS_INTEGRATED_DP(dev)) + } + if (SUPPORTS_INTEGRATED_DP(dev)) { + DRM_DEBUG_KMS("probing DP_C\n"); intel_dp_init(dev, DP_C); + } } - if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) + if (SUPPORTS_INTEGRATED_DP(dev) && + (I915_READ(DP_D) & DP_DETECTED)) { + DRM_DEBUG_KMS("probing DP_D\n"); intel_dp_init(dev, DP_D); + } } else if (IS_I8XX(dev)) intel_dvo_init(dev); @@ -4527,6 +4443,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_changed = intelfb_probe, }; +static struct drm_gem_object * +intel_alloc_power_context(struct drm_device *dev) +{ + struct drm_gem_object *pwrctx; + int ret; + + pwrctx = drm_gem_object_alloc(dev, 4096); + if (!pwrctx) { + DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); + return NULL; + } + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(pwrctx, 4096); + if (ret) { + DRM_ERROR("failed to pin power context: %d\n", ret); + goto err_unref; + } + + ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); + if (ret) { + DRM_ERROR("failed to set-domain on power context: %d\n", ret); + goto err_unpin; + } + mutex_unlock(&dev->struct_mutex); + + return pwrctx; + +err_unpin: + i915_gem_object_unpin(pwrctx); +err_unref: + drm_gem_object_unreference(pwrctx); + mutex_unlock(&dev->struct_mutex); + return NULL; +} + void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4579,42 +4531,27 @@ void intel_init_clock_gating(struct drm_device *dev) * GPU can automatically power down the render unit if given a page * to save state. */ - if (I915_HAS_RC6(dev)) { - struct drm_gem_object *pwrctx; - struct drm_i915_gem_object *obj_priv; - int ret; + if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { + struct drm_i915_gem_object *obj_priv = NULL; if (dev_priv->pwrctx) { obj_priv = dev_priv->pwrctx->driver_private; } else { - pwrctx = drm_gem_object_alloc(dev, 4096); - if (!pwrctx) { - DRM_DEBUG("failed to alloc power context, " - "RC6 disabled\n"); - goto out; - } + struct drm_gem_object *pwrctx; - ret = i915_gem_object_pin(pwrctx, 4096); - if (ret) { - DRM_ERROR("failed to pin power context: %d\n", - ret); - drm_gem_object_unreference(pwrctx); - goto out; + pwrctx = intel_alloc_power_context(dev); + if (pwrctx) { + dev_priv->pwrctx = pwrctx; + obj_priv = pwrctx->driver_private; } - - i915_gem_object_set_to_gtt_domain(pwrctx, 1); - - dev_priv->pwrctx = pwrctx; - obj_priv = pwrctx->driver_private; } - I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); - I915_WRITE(MCHBAR_RENDER_STANDBY, - I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); + if (obj_priv) { + I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); + I915_WRITE(MCHBAR_RENDER_STANDBY, + I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); + } } - -out: - return; } /* Set up chip specific display functions */ @@ -4770,7 +4707,6 @@ void intel_modeset_cleanup(struct drm_device *dev) del_timer_sync(&intel_crtc->idle_timer); } - intel_increase_renderclock(dev, false); del_timer_sync(&dev_priv->idle_timer); if (dev_priv->display.disable_fbc) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4e7aa8b7b93..1349d9fd01c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1402,14 +1402,20 @@ intel_dp_init(struct drm_device *dev, int output_reg) break; case DP_B: case PCH_DP_B: + dev_priv->hotplug_supported_mask |= + HDMIB_HOTPLUG_INT_STATUS; name = "DPDDC-B"; break; case DP_C: case PCH_DP_C: + dev_priv->hotplug_supported_mask |= + HDMIC_HOTPLUG_INT_STATUS; name = "DPDDC-C"; break; case DP_D: case PCH_DP_D: + dev_priv->hotplug_supported_mask |= + HDMID_HOTPLUG_INT_STATUS; name = "DPDDC-D"; break; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f04dbbe7d40..06431941b23 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -303,21 +303,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) if (sdvox_reg == SDVOB) { intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == SDVOC) { intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIB) { intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, "HDMIB"); + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIC) { intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, "HDMIC"); + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMID) { intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, "HDMID"); + dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; } if (!intel_output->ddc_bus) goto err_connector; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 3118ce274e6..f4b4aa242df 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -608,6 +608,13 @@ static const struct dmi_system_id bad_lid_status[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), }, }, + { + .ident = "PC-81005", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), + }, + }, { } }; @@ -679,7 +686,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, struct drm_i915_private *dev_priv = container_of(nb, struct drm_i915_private, lid_notifier); struct drm_device *dev = dev_priv->dev; + struct drm_connector *connector = dev_priv->int_lvds_connector; + /* + * check and update the status of LVDS connector after receiving + * the LID nofication event. + */ + if (connector) + connector->status = connector->funcs->detect(connector); if (!acpi_lid_open()) { dev_priv->modeset_on_lid = 1; return NOTIFY_OK; @@ -854,65 +868,6 @@ static const struct dmi_system_id intel_no_lvds[] = { { } /* terminating entry */ }; -#ifdef CONFIG_ACPI -/* - * check_lid_device -- check whether @handle is an ACPI LID device. - * @handle: ACPI device handle - * @level : depth in the ACPI namespace tree - * @context: the number of LID device when we find the device - * @rv: a return value to fill if desired (Not use) - */ -static acpi_status -check_lid_device(acpi_handle handle, u32 level, void *context, - void **return_value) -{ - struct acpi_device *acpi_dev; - int *lid_present = context; - - acpi_dev = NULL; - /* Get the acpi device for device handle */ - if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { - /* If there is no ACPI device for handle, return */ - return AE_OK; - } - - if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) - *lid_present = 1; - - return AE_OK; -} - -/** - * check whether there exists the ACPI LID device by enumerating the ACPI - * device tree. - */ -static int intel_lid_present(void) -{ - int lid_present = 0; - - if (acpi_disabled) { - /* If ACPI is disabled, there is no ACPI device tree to - * check, so assume the LID device would have been present. - */ - return 1; - } - - acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, - check_lid_device, NULL, &lid_present, NULL); - - return lid_present; -} -#else -static int intel_lid_present(void) -{ - /* In the absence of ACPI built in, assume that the LID device would - * have been present. - */ - return 1; -} -#endif - /** * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID * @dev: drm device @@ -1031,12 +986,8 @@ void intel_lvds_init(struct drm_device *dev) if (dmi_check_system(intel_no_lvds)) return; - /* - * Assume LVDS is present if there's an ACPI lid device or if the - * device is present in the VBT. - */ - if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) { - DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n"); + if (!lvds_is_present_in_vbt(dev)) { + DRM_DEBUG_KMS("LVDS is not present in VBT\n"); return; } @@ -1180,6 +1131,8 @@ out: DRM_DEBUG_KMS("lid notifier registration failed\n"); dev_priv->lid_notifier.notifier_call = NULL; } + /* keep the LVDS connector */ + dev_priv->int_lvds_connector = connector; drm_sysfs_connector_add(connector); return; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 24a3dc99716..de5144c8c15 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2662,6 +2662,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) bool intel_sdvo_init(struct drm_device *dev, int output_device) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; struct intel_output *intel_output; struct intel_sdvo_priv *sdvo_priv; @@ -2708,10 +2709,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, "SDVOB/VGA DDC BUS"); + dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; } else { intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, "SDVOC/VGA DDC BUS"); + dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; } if (intel_output->ddc_bus == NULL) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b74..1d5b9b7b033 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1840,6 +1840,8 @@ intel_tv_init(struct drm_device *dev) drm_connector_attach_property(connector, dev->mode_config.tv_bottom_margin_property, tv_priv->margin[TV_MARGIN_BOTTOM]); + + dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS; out: drm_sysfs_connector_add(connector); } diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 321044bef71..41dd8ebff21 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev i2c.i2c_id = gpio->sucI2cId.ucAccess; i2c.valid = true; + break; } } @@ -1026,6 +1027,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ss->delay = ss_info->asSS_Info[i].ucSS_Delay; ss->range = ss_info->asSS_Info[i].ucSS_Range; ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; + break; } } } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index fd94dbca33a..58f342659cc 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -595,6 +595,34 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) return false; } +static const uint32_t default_primarydac_adj[CHIP_LAST] = { + 0x00000808, /* r100 */ + 0x00000808, /* rv100 */ + 0x00000808, /* rs100 */ + 0x00000808, /* rv200 */ + 0x00000808, /* rs200 */ + 0x00000808, /* r200 */ + 0x00000808, /* rv250 */ + 0x00000000, /* rs300 */ + 0x00000808, /* rv280 */ + 0x00000808, /* r300 */ + 0x00000808, /* r350 */ + 0x00000808, /* rv350 */ + 0x00000808, /* rv380 */ + 0x00000808, /* r420 */ + 0x00000808, /* r423 */ + 0x00000808, /* rv410 */ + 0x00000000, /* rs400 */ + 0x00000000, /* rs480 */ +}; + +static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev, + struct radeon_encoder_primary_dac *p_dac) +{ + p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family]; + return; +} + struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder) @@ -604,20 +632,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct uint16_t dac_info; uint8_t rev, bg, dac; struct radeon_encoder_primary_dac *p_dac = NULL; + int found = 0; - if (rdev->bios == NULL) + p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), + GFP_KERNEL); + + if (!p_dac) return NULL; + if (rdev->bios == NULL) + goto out; + /* check CRT table */ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); if (dac_info) { - p_dac = - kzalloc(sizeof(struct radeon_encoder_primary_dac), - GFP_KERNEL); - - if (!p_dac) - return NULL; - rev = RBIOS8(dac_info) & 0x3; if (rev < 2) { bg = RBIOS8(dac_info + 0x2) & 0xf; @@ -628,9 +656,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct dac = RBIOS8(dac_info + 0x3) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } - + found = 1; } +out: + if (!found) /* fallback to defaults */ + radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); + return p_dac; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 20161567dbf..b82ae61d4d1 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -615,7 +615,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect ret = connector_status_connected; } } else { - if (radeon_connector->dac_load_detect) { + if (radeon_connector->dac_load_detect && encoder) { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 0b2f9c2ad2c..06123ba31d3 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master) &master_priv->sarea); if (ret) { DRM_ERROR("SAREA setup failed\n"); + kfree(master_priv); return ret; } master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 7c6848096bc..0c51f8e4661 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -733,16 +733,18 @@ void radeon_device_fini(struct radeon_device *rdev) */ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) { - struct radeon_device *rdev = dev->dev_private; + struct radeon_device *rdev; struct drm_crtc *crtc; int r; - if (dev == NULL || rdev == NULL) { + if (dev == NULL || dev->dev_private == NULL) { return -ENODEV; } if (state.event == PM_EVENT_PRETHAW) { return 0; } + rdev = dev->dev_private; + /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 91d72b70abc..1fb2f029d7e 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -329,8 +329,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) ret = radeon_get_atom_connector_info_from_object_table(dev); else ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); - } else + } else { ret = radeon_get_legacy_connector_info_from_bios(dev); + if (ret == false) + ret = radeon_get_legacy_connector_info_from_table(dev); + } } else { if (!ASIC_IS_AVIVO(rdev)) ret = radeon_get_legacy_connector_info_from_table(dev); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 4cdd8b4f754..8495d4e32e1 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) bool radeon_fence_signaled(struct radeon_fence *fence) { - struct radeon_device *rdev = fence->rdev; unsigned long irq_flags; bool signaled = false; - if (rdev->gpu_lockup) { + if (!fence) return true; - } - if (fence == NULL) { + + if (fence->rdev->gpu_lockup) return true; - } + write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); signaled = fence->signaled; /* if we are shuting down report all fence as signaled */ diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index b79ecc4a7cc..2f349a30019 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c @@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr drm_radeon_irq_emit_t *emit = data; int result; - if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) - return -EINVAL; - - LOCK_TEST_WITH_RETURN(dev, file_priv); - if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return -EINVAL; + + LOCK_TEST_WITH_RETURN(dev, file_priv); + result = radeon_emit_irq(dev); if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 4f8ea426057..4245218e954 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -396,7 +396,7 @@ int rs600_irq_process(struct radeon_device *rdev) } while (status || r500_disp_int) { /* SW interrupt */ - if (G_000040_SW_INT_EN(status)) + if (G_000044_SW_INT(status)) radeon_fence_process(rdev); /* Vertical blank interrupts */ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 1c01b96c961..2d28d58200d 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -4684,6 +4684,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); + pdev->d3_delay = 150; return 0; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index c5df94e8667..807224ec835 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -75,7 +75,8 @@ static ssize_t local_cpus_show(struct device *dev, int len; #ifdef CONFIG_NUMA - mask = cpumask_of_node(dev_to_node(dev)); + mask = (dev_to_node(dev) == -1) ? cpu_online_mask : + cpumask_of_node(dev_to_node(dev)); #else mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); #endif @@ -93,7 +94,8 @@ static ssize_t local_cpulist_show(struct device *dev, int len; #ifdef CONFIG_NUMA - mask = cpumask_of_node(dev_to_node(dev)); + mask = (dev_to_node(dev) == -1) ? cpu_online_mask : + cpumask_of_node(dev_to_node(dev)); #else mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); #endif diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0906599ebfd..315fea47e78 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -29,7 +29,17 @@ const char *pci_power_names[] = { }; EXPORT_SYMBOL_GPL(pci_power_names); -unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; +unsigned int pci_pm_d3_delay; + +static void pci_dev_d3_sleep(struct pci_dev *dev) +{ + unsigned int delay = dev->d3_delay; + + if (delay < pci_pm_d3_delay) + delay = pci_pm_d3_delay; + + msleep(delay); +} #ifdef CONFIG_PCI_DOMAINS int pci_domains_supported = 1; @@ -522,7 +532,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) /* Mandatory power management transition delays */ /* see PCI PM 1.1 5.6.1 table 18 */ if (state == PCI_D3hot || dev->current_state == PCI_D3hot) - msleep(pci_pm_d3_delay); + pci_dev_d3_sleep(dev); else if (state == PCI_D2 || dev->current_state == PCI_D2) udelay(PCI_PM_D2_DELAY); @@ -1409,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev) } dev->pm_cap = pm; + dev->d3_delay = PCI_PM_D3_WAIT; dev->d1_support = false; dev->d2_support = false; @@ -2247,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) csr &= ~PCI_PM_CTRL_STATE_MASK; csr |= PCI_D3hot; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); - msleep(pci_pm_d3_delay); + pci_dev_d3_sleep(dev); csr &= ~PCI_PM_CTRL_STATE_MASK; csr |= PCI_D0; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); - msleep(pci_pm_d3_delay); + pci_dev_d3_sleep(dev); return 0; } diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 797d47809f7..8c30a9544d6 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c @@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj) unsigned long flags; unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); int pos_cap_err, rp_pos_cap_err; - u32 sever; + u32 sever, mask; int ret = 0; dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); @@ -374,6 +374,24 @@ static int aer_inject(struct aer_error_inj *einj) err->header_log2 = einj->header_log2; err->header_log3 = einj->header_log3; + pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &mask); + if (einj->cor_status && !(einj->cor_status & ~mask)) { + ret = -EINVAL; + printk(KERN_WARNING "The correctable error(s) is masked " + "by device\n"); + spin_unlock_irqrestore(&inject_lock, flags); + goto out_put; + } + + pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &mask); + if (einj->uncor_status && !(einj->uncor_status & ~mask)) { + ret = -EINVAL; + printk(KERN_WARNING "The uncorrectable error(s) is masked " + "by device\n"); + spin_unlock_irqrestore(&inject_lock, flags); + goto out_put; + } + rperr = __find_aer_error_by_dev(rpdev); if (!rperr) { rperr = rperr_alloc; @@ -413,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj) if (ret) goto out_put; - if (find_aer_device(rpdev, &edev)) + if (find_aer_device(rpdev, &edev)) { + if (!get_service_data(edev)) { + printk(KERN_WARNING "AER service is not initialized\n"); + ret = -EINVAL; + goto out_put; + } aer_irq(-1, edev); + } else ret = -EINVAL; out_put: diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 413262eb95b..b174188ac12 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -27,7 +27,7 @@ */ static void release_pcie_device(struct device *dev) { - kfree(to_pcie_device(dev)); + kfree(to_pcie_device(dev)); } /** @@ -346,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data) { struct pcie_port_service_driver *service_driver; - if ((dev->bus == &pcie_port_bus_type) && - (dev->driver)) { - service_driver = to_service_driver(dev->driver); - if (service_driver->suspend) - service_driver->suspend(to_pcie_device(dev)); - } + if ((dev->bus == &pcie_port_bus_type) && dev->driver) { + service_driver = to_service_driver(dev->driver); + if (service_driver->suspend) + service_driver->suspend(to_pcie_device(dev)); + } return 0; } @@ -494,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new) return driver_register(&new->driver); } +EXPORT_SYMBOL(pcie_port_service_register); /** * pcie_port_service_unregister - unregister PCI Express port service driver @@ -503,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv) { driver_unregister(&drv->driver); } - -EXPORT_SYMBOL(pcie_port_service_register); EXPORT_SYMBOL(pcie_port_service_unregister); diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 34d65172a4d..13c8972886e 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -63,7 +63,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { * pcie_portdrv_probe - Probe PCI-Express port devices * @dev: PCI-Express port device being probed * - * If detected invokes the pcie_port_device_register() method for + * If detected invokes the pcie_port_device_register() method for * this port device. * */ @@ -78,7 +78,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) return -ENODEV; - if (!dev->irq && dev->pin) { + if (!dev->irq && dev->pin) { dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " "check vendor BIOS\n", dev->vendor, dev->device); } @@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, return 0; } -static void pcie_portdrv_remove (struct pci_dev *dev) +static void pcie_portdrv_remove(struct pci_dev *dev) { pcie_port_device_remove(dev); pci_disable_device(dev); @@ -129,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data) static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, enum pci_channel_state error) { - struct aer_broadcast_data result_data = - {error, PCI_ERS_RESULT_CAN_RECOVER}; - int retval; + struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER}; + int ret; /* can not fail */ - retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter); + ret = device_for_each_child(&dev->dev, &data, error_detected_iter); - return result_data.result; + return data.result; } static int mmio_enabled_iter(struct device *device, void *data) @@ -290,7 +289,7 @@ static int __init pcie_portdrv_init(void) return retval; } -static void __exit pcie_portdrv_exit(void) +static void __exit pcie_portdrv_exit(void) { pci_unregister_driver(&pcie_portdriver); pcie_port_bus_unregister(); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 5b648f0c607..ad4c414dbfb 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -393,8 +393,6 @@ static void hp_wmi_notify(u32 value, void *context) } else printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", eventcode); - - kfree(obj); } static int __init hp_wmi_input_setup(void) diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index 26ffdcd5a43..15a00e8b712 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c @@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn) static int is_cxgb3_dev(struct net_device *dev) { struct cxgb3i_sdev_data *cdata; + struct net_device *ndev = dev; + + if (dev->priv_flags & IFF_802_1Q_VLAN) + ndev = vlan_dev_real_dev(dev); write_lock(&cdata_rwlock); list_for_each_entry(cdata, &cdata_list, list) { @@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev) int i; for (i = 0; i < ports->nports; i++) - if (dev == ports->lldevs[i]) { + if (ndev == ports->lldevs[i]) { write_unlock(&cdata_rwlock); return 1; } @@ -1566,6 +1570,26 @@ out_err: return -EINVAL; } +/** + * cxgb3i_find_dev - find the interface associated with the given address + * @ipaddr: ip address + */ +static struct net_device * +cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr) +{ + struct flowi fl; + int err; + struct rtable *rt; + + memset(&fl, 0, sizeof(fl)); + fl.nl_u.ip4_u.daddr = ipaddr; + + err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl); + if (!err) + return (&rt->u.dst)->dev; + + return NULL; +} /** * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address @@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata; struct t3cdev *cdev; __be32 sipv4; + struct net_device *dstdev; int err; c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); @@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, c3cn->daddr.sin_port = usin->sin_port; c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; + dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr); + if (!dstdev || !is_cxgb3_dev(dstdev)) + return -ENETUNREACH; + + if (dstdev->priv_flags & IFF_802_1Q_VLAN) + dev = dstdev; + rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, c3cn->daddr.sin_addr.s_addr, c3cn->saddr.sin_port, diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index ce522702a6c..2cc39684ce9 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, spin_lock_irq(shost->host_lock); if (vport->fc_rscn_flush) { /* Another thread is walking fc_rscn_id_list on this vport */ - spin_unlock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_DISCOVERY; + spin_unlock_irq(shost->host_lock); /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return 0; @@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_initial_fdisc(vport); break; } - } else { + vport->vpi_state |= LPFC_VPI_REGISTERED; if (vport == phba->pport) if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 3b942442765..2445e399fd6 100755 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba) if (phba->link_state == LPFC_LINK_DOWN) return 0; + + /* Block all SCSI stack I/Os */ + lpfc_scsi_dev_block(phba); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); if (phba->link_state > LPFC_LINK_DOWN) { @@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * to book keeping the FCFIs can be used. */ if (shdr_status || shdr_add_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2521 READ_FCF_RECORD mailbox failed " - "with status x%x add_status x%x, mbx\n", - shdr_status, shdr_add_status); + if (shdr_status == STATUS_FCF_TABLE_EMPTY) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2726 READ_FCF_RECORD Indicates empty " + "FCF table.\n"); + } else { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2521 READ_FCF_RECORD mailbox failed " + "with status x%x add_status x%x, mbx\n", + shdr_status, shdr_add_status); + } goto out; } /* Interpreting the returned information of FCF records */ @@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } + spin_lock_irq(&phba->hbalock); vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(&phba->hbalock); if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) lpfc_initial_fdisc(vport); @@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mb->mbxStatus); break; } + spin_lock_irq(&phba->hbalock); vport->vpi_state &= ~LPFC_VPI_REGISTERED; + vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(&phba->hbalock); vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); /* @@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_mbx_unreg_vpi(vports[i]); + spin_lock_irq(&phba->hbalock); vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(&phba->hbalock); } lpfc_destroy_vport_work_array(phba, vports); diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1585148a17e..8a2a1c5935c 100644..100755 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy { }; #define LPFC_HDR_BUF_SIZE 128 -#define LPFC_DATA_BUF_SIZE 4096 +#define LPFC_DATA_BUF_SIZE 2048 struct rq_context { uint32_t word0; #define lpfc_rq_context_rq_size_SHIFT 16 @@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg { #define STATUS_ERROR_ACITMAIN 0x2a #define STATUS_REBOOT_REQUIRED 0x2c #define STATUS_FCF_IN_USE 0x3a +#define STATUS_FCF_TABLE_EMPTY 0x43 struct lpfc_mbx_sli4_config { struct mbox_header header; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d4da6bdd0e7..b8eb1b6e5e7 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; + uint32_t link_state; phba->fc_eventTag = acqe_fcoe->event_tag; phba->fcoe_eventtag = acqe_fcoe->event_tag; @@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, break; /* * Currently, driver support only one FCF - so treat this as - * a link down. + * a link down, but save the link state because we don't want + * it to be changed to Link Down unless it is already down. */ + link_state = phba->link_state; lpfc_linkdown(phba); + phba->link_state = link_state; /* Unregister FCF if no devices connected to it */ lpfc_unregister_unused_fcf(phba); break; @@ -7226,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2711 PCI channel permanent disable for failure\n"); - /* Block all SCSI devices' I/Os on the host */ - lpfc_scsi_dev_block(phba); /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_fcp_rings(phba); } @@ -7256,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + switch (state) { case pci_channel_io_normal: /* Non-fatal error, prepare for recovery */ @@ -7507,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) error = -ENODEV; goto out_free_sysfs_attr; } + /* Default to single FCP EQ for non-MSI-X */ + if (phba->intr_type != MSIX) + phba->cfg_fcp_eq_count = 1; /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 7935667b81a..589549b2bf0 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, /* HBQ for ELS and CT traffic. */ static struct lpfc_hbq_init lpfc_els_hbq = { .rn = 1, - .entry_count = 200, + .entry_count = 256, .mask_count = 0, .profile = 0, .ring_mask = (1 << LPFC_ELS_RING), @@ -1482,8 +1482,11 @@ err: int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) { - return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, - lpfc_hbq_defs[qno]->add_count)); + if (phba->sli_rev == LPFC_SLI_REV4) + return 0; + else + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->add_count); } /** @@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) static int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) { - return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, - lpfc_hbq_defs[qno]->init_count)); + if (phba->sli_rev == LPFC_SLI_REV4) + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->entry_count); + else + return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, + lpfc_hbq_defs[qno]->init_count); } /** @@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, if (rc) { dma_free_coherent(&phba->pcidev->dev, dma_size, dmabuf->virt, dmabuf->phys); + kfree(dmabuf); return -EIO; } @@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.un.ulpWord[3]); wqe->generic.word3 = 0; bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); - bf_set(wqe_xc, &wqe->generic, 1); /* The entire sequence is transmitted for this IOCB */ xmit_len = total_len; cmnd = CMD_XMIT_SEQUENCE64_CR; @@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) return dmabuf; } temp_hdr = seq_dmabuf->hbuf.virt; - if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { + if (be16_to_cpu(new_hdr->fh_seq_cnt) < + be16_to_cpu(temp_hdr->fh_seq_cnt)) { list_del_init(&seq_dmabuf->hbuf.list); list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); @@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); seq_dmabuf->time_stamp = jiffies; lpfc_update_rcv_time_stamp(vport); + if (list_empty(&seq_dmabuf->dbuf.list)) { + temp_hdr = dmabuf->hbuf.virt; + list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); + return seq_dmabuf; + } /* find the correct place in the sequence to insert this frame */ list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); @@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) * If the frame's sequence count is greater than the frame on * the list then insert the frame right after this frame */ - if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { + if (be16_to_cpu(new_hdr->fh_seq_cnt) > + be16_to_cpu(temp_hdr->fh_seq_cnt)) { list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); return seq_dmabuf; } @@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf) seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* If there is a hole in the sequence count then fail. */ - if (++seq_count != hdr->fh_seq_cnt) + if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) return 0; fctl = (hdr->fh_f_ctl[0] << 16 | hdr->fh_f_ctl[1] << 8 | @@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) struct lpfc_iocbq *first_iocbq, *iocbq; struct fc_frame_header *fc_hdr; uint32_t sid; + struct ulp_bde64 *pbde; fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* remove from receive buffer list */ @@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) if (!iocbq->context3) { iocbq->context3 = d_buf; iocbq->iocb.ulpBdeCount++; - iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = - LPFC_DATA_BUF_SIZE; + pbde = (struct ulp_bde64 *) + &iocbq->iocb.unsli3.sli3Words[4]; + pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; first_iocbq->iocb.unsli3.rcvsli3.acc_len += bf_get(lpfc_rcqe_length, &seq_dmabuf->cq_event.cqe.rcqe_cmpl); @@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, return; } /* If not last frame in sequence continue processing frames. */ - if (!lpfc_seq_complete(seq_dmabuf)) { - /* - * When saving off frames post a new one and mark this - * frame to be freed when it is finished. - **/ - lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); - dmabuf->tag = -1; + if (!lpfc_seq_complete(seq_dmabuf)) return; - } + /* Send the complete sequence to the upper layer protocol */ lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 25d66d070cf..44e5f574236 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -28,7 +28,7 @@ /* Multi-queue arrangement for fast-path FCP work queues */ #define LPFC_FN_EQN_MAX 8 #define LPFC_SP_EQN_DEF 1 -#define LPFC_FP_EQN_DEF 1 +#define LPFC_FP_EQN_DEF 4 #define LPFC_FP_EQN_MIN 1 #define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c7f3aed2aab..792f72263f1 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.6" +#define LPFC_DRIVER_VERSION "8.3.7" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 7d6dd83d359..e3c7fa64230 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport) return VPORT_OK; } + spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(&phba->hbalock); /* Use the Physical nodes Fabric NDLP to determine if the link is * up and ready to FDISC. @@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) } spin_unlock_irq(&phba->ndlp_lock); } - if (vport->vpi_state != LPFC_VPI_REGISTERED) + if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) goto skip_logo; vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index e7d2688fbeb..b6f1ef954af 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -2483,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) sense_copied = 1; } - if (RES_IS_GSCSI(res->cfg_entry)) { + if (RES_IS_GSCSI(res->cfg_entry)) pmcraid_cancel_all(cmd, sense_copied); - } else if (sense_copied) { + else if (sense_copied) pmcraid_erp_done(cmd); - return 0; - } else { + else pmcraid_request_sense(cmd); - } return 1; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 21e2bc4d740..3a9f5b288ae 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, if (off) return 0; + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) return -EINVAL; if (start > ha->optrom_size) @@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + if (!capable(CAP_SYS_ADMIN)) return 0; @@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj, struct qla_hw_data *ha = vha->hw; uint8_t *tmp_data; + if (unlikely(pci_channel_offline(ha->pdev))) + return 0; + if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || !ha->isp_ops->write_nvram) return 0; @@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - int rval; + int rval = QLA_FUNCTION_FAILED; uint16_t state[5]; - rval = qla2x00_get_firmware_state(vha, state); + if (!vha->hw->flags.eeh_busy) + rval = qla2x00_get_firmware_state(vha, state); if (rval != QLA_SUCCESS) memset(state, -1, sizeof(state)); @@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) if (!fcport) return; - if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) + return; + + if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); - else - qla2x00_abort_fcport_cmds(fcport); + return; + } /* * Transport has effectively 'deleted' the rport, clear @@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (!fcport) return; + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) + return; + if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); return; @@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) pfc_host_stat = &ha->fc_host_stat; memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); + if (test_bit(UNLOADING, &vha->dpc_flags)) + goto done; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto done; + stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); if (stats == NULL) { DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index f660dd70b72..d6d9c86cb05 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -26,7 +26,7 @@ /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ -/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */ +/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ /* * Macros use for debugging the driver. @@ -132,6 +132,13 @@ #else #define DEBUG16(x) do {} while (0) #endif + +#if defined(QL_DEBUG_LEVEL_17) +#define DEBUG17(x) do {x;} while (0) +#else +#define DEBUG17(x) do {} while (0) +#endif + /* * Firmware Dump structure definition */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 384afda7dbe..608e675f68c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2256,11 +2256,13 @@ struct qla_hw_data { uint32_t disable_serdes :1; uint32_t gpsc_supported :1; uint32_t npiv_supported :1; + uint32_t pci_channel_io_perm_failure :1; uint32_t fce_enabled :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; uint32_t port0 :1; uint32_t running_gold_fw :1; + uint32_t eeh_busy :1; uint32_t cpu_affinity_enabled :1; uint32_t disable_msix_handshake :1; } flags; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 0b6801fc638..f61fb8d0133 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -324,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); extern int qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); +extern int qla2x00_get_data_rate(scsi_qla_host_t *); /* * Global Function Prototypes in qla_isr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 73a793539d4..b4a0eac8f96 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) vha->flags.online = 0; ha->flags.chip_reset_done = 0; vha->flags.reset_active = 0; + ha->flags.pci_channel_io_perm_failure = 0; + ha->flags.eeh_busy = 0; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; @@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) uint32_t cnt; uint16_t cmd; + if (unlikely(pci_channel_offline(ha->pdev))) + return; + ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); @@ -786,6 +791,12 @@ void qla24xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + + if (pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure) { + return; + } + ha->isp_ops->disable_intrs(ha); /* Perform RISC reset. */ @@ -2266,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); clear_bit(RSCN_UPDATE, &vha->dpc_flags); + qla2x00_get_data_rate(vha); + /* Determine what we need to do */ if (ha->current_topology == ISP_CFG_FL && (test_bit(LOCAL_LOOP_UPDATE, &flags))) { @@ -3560,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); + if (unlikely(pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure)) { + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + status = 0; + return status; + } + ha->isp_ops->get_flash_version(vha, req->ring); ha->isp_ops->nvram_config(vha); @@ -4458,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) int ret, retries; struct qla_hw_data *ha = vha->hw; + if (ha->flags.pci_channel_io_perm_failure) + return; if (!IS_FWI2_CAPABLE(ha)) return; if (!ha->fw_major_version) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 1692a883f4d..ffd0efdff40 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id) for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) { - if (pci_channel_offline(ha->pdev)) + if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_WORD(®->hccr); @@ -1846,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id) reg = &ha->iobase->isp24; status = 0; + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { - if (pci_channel_offline(ha->pdev)) + if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(®->hccr); @@ -1992,7 +1995,7 @@ qla24xx_msix_default(int irq, void *dev_id) do { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { - if (pci_channel_offline(ha->pdev)) + if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(®->hccr); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 05d595d9a7e..056e4d4505f 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); + if (ha->flags.pci_channel_io_perm_failure) { + DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " + "Exiting.\n", __func__, vha->host_no)); + return QLA_FUNCTION_TIMEOUT; + } + /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during @@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); - if (command != MBC_LOAD_RISC_RAM_EXTENDED && - !ha->flags.mbox_int) + if (!ha->flags.mbox_int && + !(IS_QLA2200(ha) && + command == MBC_LOAD_RISC_RAM_EXTENDED)) msleep(10); } /* while */ + DEBUG17(qla_printk(KERN_WARNING, ha, + "Waited %d sec\n", + (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); } /* Check whether we timed out */ @@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (rval == QLA_FUNCTION_TIMEOUT && mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { - if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { + if (!io_lock_on || (mcp->flags & IOCTL_CMD) || + ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ DEBUG(printk("%s(%ld): timeout schedule " "isp_abort_needed.\n", __func__, @@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) base_vha->host_no)); qla_printk(KERN_WARNING, ha, "Mailbox command timeout occurred. Scheduling ISP " - "abort.\n"); + "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy); set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (!abort_active) { @@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha) if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; + if (unlikely(pci_channel_offline(vha->hw->pdev))) + return QLA_FUNCTION_FAILED; + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); mcp->mb[0] = MBC_TRACE_CONTROL; @@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) return rval; } + +int +qla2x00_get_data_rate(scsi_qla_host_t *vha) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_DATA_RATE; + mcp->mb[1] = 0; + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", + __func__, vha->host_no, rval, mcp->mb[0])); + } else { + DEBUG11(printk(KERN_INFO + "%s(%ld): done.\n", __func__, vha->host_no)); + if (mcp->mb[1] != 0x7) + ha->link_data_rate = mcp->mb[1]; + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 2a4c7f4e7b6..b901aa267e7 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -639,8 +639,10 @@ static void qla_do_work(struct work_struct *work) struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); struct scsi_qla_host *vha; + spin_lock_irq(&rsp->hw->hardware_lock); vha = qla25xx_get_host(rsp); qla24xx_process_response_queue(vha, rsp); + spin_unlock_irq(&rsp->hw->hardware_lock); } /* create response queue */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2f873d23732..209f50e788a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -475,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) srb_t *sp; int rval; - if (unlikely(pci_channel_offline(ha->pdev))) { - if (ha->pdev->error_state == pci_channel_io_frozen) - cmd->result = DID_REQUEUE << 16; - else + if (ha->flags.eeh_busy) { + if (ha->flags.pci_channel_io_perm_failure) cmd->result = DID_NO_CONNECT << 16; + else + cmd->result = DID_REQUEUE << 16; goto qc24_fail_command; } @@ -552,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) #define ABORT_POLLING_PERIOD 1000 #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) unsigned long wait_iter = ABORT_WAIT_ITER; + scsi_qla_host_t *vha = shost_priv(cmd->device->host); + struct qla_hw_data *ha = vha->hw; int ret = QLA_SUCCESS; + if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { + DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); + return ret; + } + while (CMD_SP(cmd) && wait_iter--) { msleep(ABORT_POLLING_PERIOD); } @@ -1810,6 +1817,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Set ISP-type information. */ qla2x00_set_isp_flags(ha); + + /* Set EEH reset type to fundamental if required by hba */ + if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { + pdev->needs_freset = 1; + pci_save_state(pdev); + } + /* Configure PCI I/O space */ ret = qla2x00_iospace_config(ha); if (ret) @@ -2174,6 +2188,24 @@ qla2x00_free_device(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + + /* Kill the kernel thread for this host */ + if (ha->dpc_thread) { + struct task_struct *t = ha->dpc_thread; + + /* + * qla2xxx_wake_dpc checks for ->dpc_thread + * so we need to zero it out. + */ + ha->dpc_thread = NULL; + kthread_stop(t); + } + qla25xx_delete_queues(vha); if (ha->flags.fce_enabled) @@ -2185,6 +2217,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) /* Stop currently executing firmware. */ qla2x00_try_to_stop_firmware(vha); + vha->flags.online = 0; + /* turn-off interrupts on the card */ if (ha->interrupts_on) ha->isp_ops->disable_intrs(ha); @@ -2859,6 +2893,13 @@ qla2x00_do_dpc(void *data) if (!base_vha->flags.init_done) continue; + if (ha->flags.eeh_busy) { + DEBUG17(qla_printk(KERN_WARNING, ha, + "qla2x00_do_dpc: dpc_flags: %lx\n", + base_vha->dpc_flags)); + continue; + } + DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); ha->dpc_active = 1; @@ -3049,8 +3090,13 @@ qla2x00_timer(scsi_qla_host_t *vha) int index; srb_t *sp; int t; + uint16_t w; struct qla_hw_data *ha = vha->hw; struct req_que *req; + + /* Hardware read to raise pending EEH errors during mailbox waits. */ + if (!pci_channel_offline(ha->pdev)) + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); /* * Ports - Port down timer. * @@ -3252,16 +3298,23 @@ qla2x00_release_firmware(void) static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { - scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + scsi_qla_host_t *vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = vha->hw; + + DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", + state)); switch (state) { case pci_channel_io_normal: + ha->flags.eeh_busy = 0; return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: + ha->flags.eeh_busy = 1; pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + ha->flags.pci_channel_io_perm_failure = 1; + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; @@ -3312,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int rc; + DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); + if (ha->mem_only) rc = pci_enable_device_mem(pdev); else @@ -3320,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) if (rc) { qla_printk(KERN_WARNING, ha, "Can't re-enable PCI device after reset.\n"); - return ret; } - pci_set_master(pdev); if (ha->isp_ops->pci_config(base_vha)) return ret; +#ifdef QL_DEBUG_LEVEL_17 + { + uint8_t b; + uint32_t i; + + printk("slot_reset_1: "); + for (i = 0; i < 256; i++) { + pci_read_config_byte(ha->pdev, i, &b); + printk("%s%02x", (i%16) ? " " : "\n", b); + } + printk("\n"); + } +#endif set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) ret = PCI_ERS_RESULT_RECOVERED; clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + DEBUG17(qla_printk(KERN_WARNING, ha, + "slot_reset-return:ret=%x\n", ret)); + return ret; } @@ -3343,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int ret; + DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); + ret = qla2x00_wait_for_hba_online(base_vha); if (ret != QLA_SUCCESS) { qla_printk(KERN_ERR, ha, "the device failed to resume I/O " "from slot/link_reset"); } + + ha->flags.eeh_busy = 0; + pci_cleanup_aer_uncorrect_error_status(pdev); } diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index c482220f7ee..a65dd95507c 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k8" +#define QLA2XXX_VERSION "8.03.01-k9" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 3058bb1aff9..fd7b15be764 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) } break; case INQUIRY: + if (lun >= host->max_lun) { + cmd->result = DID_NO_CONNECT << 16; + done(cmd); + return 0; + } if (id != host->max_id - 1) break; if (!lun && !cmd->device->channel && diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 7dc85997e96..c57d9ce5ff7 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -171,6 +171,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, #ifdef ELF_FDPIC_PLAT_INIT unsigned long dynaddr; #endif +#ifndef CONFIG_MMU + unsigned long stack_prot; +#endif struct file *interpreter = NULL; /* to shut gcc up */ char *interpreter_name = NULL; int executable_stack; @@ -316,6 +319,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, * defunct, deceased, etc. after this point we have to exit via * error_kill */ set_personality(PER_LINUX_FDPIC); + if (elf_read_implies_exec(&exec_params.hdr, executable_stack)) + current->personality |= READ_IMPLIES_EXEC; set_binfmt(&elf_fdpic_format); current->mm->start_code = 0; @@ -377,9 +382,13 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, if (stack_size < PAGE_SIZE * 2) stack_size = PAGE_SIZE * 2; + stack_prot = PROT_READ | PROT_WRITE; + if (executable_stack == EXSTACK_ENABLE_X || + (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC)) + stack_prot |= PROT_EXEC; + down_write(¤t->mm->mmap_sem); - current->mm->start_brk = do_mmap(NULL, 0, stack_size, - PROT_READ | PROT_WRITE | PROT_EXEC, + current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED | MAP_GROWSDOWN, 0); diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2c5ace4f00a..3c7f03b669f 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1615,6 +1615,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out; new_dentry = dentry; + rehash = NULL; new_inode = NULL; } } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 7c2e337d05a..c194793b642 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -780,12 +780,9 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp, int (*fsync) (struct file *, struct dentry *, int); int err; - err = filemap_fdatawrite(inode->i_mapping); + err = filemap_write_and_wait(inode->i_mapping); if (err == 0 && fop && (fsync = fop->fsync)) err = fsync(filp, dp, 0); - if (err == 0) - err = filemap_fdatawait(inode->i_mapping); - return err; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 71dafb69cfe..ffac157fb5b 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1408,7 +1408,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info * gart_info); extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, - size_t align, dma_addr_t maxaddr); + size_t align); extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index ec3f5e80a5d..b64a8d7cdf6 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -188,6 +188,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_MADVISE 0x26 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 #define DRM_I915_OVERLAY_ATTRS 0x28 +#define DRM_I915_GEM_EXECBUFFER2 0x29 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -207,6 +208,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) +#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) @@ -272,6 +274,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_NUM_FENCES_AVAIL 6 #define I915_PARAM_HAS_OVERLAY 7 #define I915_PARAM_HAS_PAGEFLIPPING 8 +#define I915_PARAM_HAS_EXECBUF2 9 typedef struct drm_i915_getparam { int param; @@ -567,6 +570,57 @@ struct drm_i915_gem_execbuffer { __u64 cliprects_ptr; }; +struct drm_i915_gem_exec_object2 { + /** + * User's handle for a buffer to be bound into the GTT for this + * operation. + */ + __u32 handle; + + /** Number of relocations to be performed on this buffer */ + __u32 relocation_count; + /** + * Pointer to array of struct drm_i915_gem_relocation_entry containing + * the relocations to be performed in this buffer. + */ + __u64 relocs_ptr; + + /** Required alignment in graphics aperture */ + __u64 alignment; + + /** + * Returned value of the updated offset of the object, for future + * presumed_offset writes. + */ + __u64 offset; + +#define EXEC_OBJECT_NEEDS_FENCE (1<<0) + __u64 flags; + __u64 rsvd1; + __u64 rsvd2; +}; + +struct drm_i915_gem_execbuffer2 { + /** + * List of gem_exec_object2 structs + */ + __u64 buffers_ptr; + __u32 buffer_count; + + /** Offset in the batchbuffer to start execution from. */ + __u32 batch_start_offset; + /** Bytes used in batchbuffer from batch_start_offset */ + __u32 batch_len; + __u32 DR1; + __u32 DR4; + __u32 num_cliprects; + /** This is a struct drm_clip_rect *cliprects */ + __u64 cliprects_ptr; + __u64 flags; /* currently unused */ + __u64 rsvd1; + __u64 rsvd2; +}; + struct drm_i915_gem_pin { /** Handle of the buffer to be pinned. */ __u32 handle; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 84a524afb3d..84d020bed08 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -123,6 +123,8 @@ struct vm_region { struct file *vm_file; /* the backing file or NULL */ atomic_t vm_usage; /* region usage count */ + bool vm_icache_flushed : 1; /* true if the icache has been flushed for + * this region */ }; /* diff --git a/include/linux/pci.h b/include/linux/pci.h index 5da0690d9ce..174e5392e51 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -243,6 +243,7 @@ struct pci_dev { unsigned int d2_support:1; /* Low power state D2 is supported */ unsigned int no_d1d2:1; /* Only allow D0 and D3 */ unsigned int wakeup_prepared:1; + unsigned int d3_delay; /* D3->D0 transition time in ms */ #ifdef CONFIG_PCIEASPM struct pcie_link_state *link_state; /* ASPM link state. */ diff --git a/mm/nommu.c b/mm/nommu.c index 6f9248f89bd..17773862619 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -432,6 +432,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) /* * Ok, looks good - let it rip. */ + flush_icache_range(mm->brk, brk); return mm->brk = brk; } @@ -1353,10 +1354,14 @@ unsigned long do_mmap_pgoff(struct file *file, share: add_vma_to_mm(current->mm, vma); - up_write(&nommu_region_sem); + /* we flush the region from the icache only when the first executable + * mapping of it is made */ + if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { + flush_icache_range(region->vm_start, region->vm_end); + region->vm_icache_flushed = true; + } - if (prot & PROT_EXEC) - flush_icache_range(result, result + len); + up_write(&nommu_region_sem); kleave(" = %lx", result); return result; @@ -1916,9 +1921,11 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in /* only read or write mappings where it is permitted */ if (write && vma->vm_flags & VM_MAYWRITE) - len -= copy_to_user((void *) addr, buf, len); + copy_to_user_page(vma, NULL, addr, + (void *) addr, buf, len); else if (!write && vma->vm_flags & VM_MAYREAD) - len -= copy_from_user(buf, (void *) addr, len); + copy_from_user_page(vma, NULL, addr, + buf, (void *) addr, len); else len = 0; } else { diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 3c3c50f38a1..f7a7f8380e3 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -644,7 +644,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); if (IS_ERR(p)) { err = PTR_ERR(p); - gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; + switch (err) { + case -EACCES: + gss_msg->msg.errno = err; + err = mlen; + break; + case -EFAULT: + case -ENOMEM: + case -EINVAL: + case -ENOSYS: + gss_msg->msg.errno = -EAGAIN; + break; + default: + printk(KERN_CRIT "%s: bad return from " + "gss_fill_context: %zd\n", __func__, err); + BUG(); + } goto err_release_msg; } gss_msg->ctx = gss_get_ctx(ctx); diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index ef45eba2248..2deb0ed72ff 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -131,8 +131,10 @@ gss_import_sec_context_kerberos(const void *p, struct krb5_ctx *ctx; int tmp; - if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) + if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { + p = ERR_PTR(-ENOMEM); goto out_err; + } p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 6efbb0cd3c7..76e4c6f4ac3 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize, struct gss_ctx **ctx_id) { if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) - return GSS_S_FAILURE; + return -ENOMEM; (*ctx_id)->mech_type = gss_mech_get(mech); return mech->gm_ops diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 1c924ee0a1e..7d1f9e928f6 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -699,7 +699,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) spin_unlock_bh(&pool->sp_lock); len = 0; - if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { + if (test_bit(XPT_LISTENER, &xprt->xpt_flags) && + !test_bit(XPT_CLOSE, &xprt->xpt_flags)) { struct svc_xprt *newxpt; newxpt = xprt->xpt_ops->xpo_accept(xprt); if (newxpt) { |