diff options
Diffstat (limited to 'drivers')
83 files changed, 2661 insertions, 453 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 652fd5ce303..cab13f2fc28 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev, if (dev_desc->clk_required) { ret = register_device_clock(adev, pdata); if (ret) { - /* - * Skip the device, but don't terminate the namespace - * scan. - */ - kfree(pdata); - return 0; + /* Skip the device, but continue the namespace scan. */ + ret = 0; + goto err_out; } } + /* + * This works around a known issue in ACPI tables where LPSS devices + * have _PS0 and _PS3 without _PSC (and no power resources), so + * acpi_bus_init_power() will assume that the BIOS has put them into D0. + */ + ret = acpi_device_fix_up_power(adev); + if (ret) { + /* Skip the device, but continue the namespace scan. */ + ret = 0; + goto err_out; + } + adev->driver_data = pdata; ret = acpi_create_platform_device(adev, id); if (ret > 0) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 318fa32a141..31c217a4283 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -290,6 +290,26 @@ int acpi_bus_init_power(struct acpi_device *device) return 0; } +/** + * acpi_device_fix_up_power - Force device with missing _PSC into D0. + * @device: Device object whose power state is to be fixed up. + * + * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, + * are assumed to be put into D0 by the BIOS. However, in some cases that may + * not be the case and this function should be used then. + */ +int acpi_device_fix_up_power(struct acpi_device *device) +{ + int ret = 0; + + if (!device->power.flags.power_resources + && !device->power.flags.explicit_get + && device->power.state == ACPI_STATE_D0) + ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); + + return ret; +} + int acpi_bus_update_power(acpi_handle handle, int *state_p) { struct acpi_device *device; diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 4fdea381ef2..ec117c6c996 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -868,8 +868,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr, if (!count) return -EINVAL; + acpi_scan_lock_acquire(); begin_undock(dock_station); ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); + acpi_scan_lock_release(); return ret ? ret: count; } static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index f962047c6c8..288bb270f8e 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle) ACPI_STA_DEFAULT); mutex_init(&resource->resource_lock); INIT_LIST_HEAD(&resource->dependent); + INIT_LIST_HEAD(&resource->list_node); resource->name = device->pnp.bus_id; strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index a3868f6c222..3322b47ab7c 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) } static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, - u8 triggering, u8 polarity, u8 shareable) + u8 triggering, u8 polarity, u8 shareable, + bool legacy) { int irq, p, t; @@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, * In IO-APIC mode, use overrided attribute. Two reasons: * 1. BIOS bug in DSDT * 2. BIOS uses IO-APIC mode Interrupt Source Override + * + * We do this only if we are dealing with IRQ() or IRQNoFlags() + * resource (the legacy ISA resources). With modern ACPI 5 devices + * using extended IRQ descriptors we take the IRQ configuration + * from _CRS directly. */ - if (!acpi_get_override_irq(gsi, &t, &p)) { + if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; if (triggering != trig || polarity != pol) { pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, - t ? "edge" : "level", p ? "low" : "high"); + t ? "level" : "edge", p ? "low" : "high"); triggering = trig; polarity = pol; } @@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, irq->interrupts[index], irq->triggering, irq->polarity, - irq->sharable); + irq->sharable, true); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ext_irq = &ares->data.extended_irq; @@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, ext_irq->interrupts[index], ext_irq->triggering, ext_irq->polarity, - ext_irq->sharable); + ext_irq->sharable, false); break; default: return false; diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 4b1f9265887..01e21037d8f 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv) { struct firmware_buf *buf = fw_priv->buf; + /* + * There is a small window in which user can write to 'loading' + * between loading done and disappearance of 'loading' + */ + if (test_bit(FW_STATUS_DONE, &buf->status)) + return; + set_bit(FW_STATUS_ABORT, &buf->status); complete_all(&buf->completion); + + /* avoid user action after loading abort */ + fw_priv->buf = NULL; } #define is_fw_load_aborted(buf) \ @@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); + int loading = 0; + + mutex_lock(&fw_lock); + if (fw_priv->buf) + loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); + mutex_unlock(&fw_lock); return sprintf(buf, "%d\n", loading); } @@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - struct firmware_buf *fw_buf = fw_priv->buf; + struct firmware_buf *fw_buf; int loading = simple_strtol(buf, NULL, 10); int i; mutex_lock(&fw_lock); - + fw_buf = fw_priv->buf; if (!fw_buf) goto out; @@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work) struct firmware_priv, timeout_work.work); mutex_lock(&fw_lock); - if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { - mutex_unlock(&fw_lock); - return; - } fw_load_abort(fw_priv); mutex_unlock(&fw_lock); } @@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, cancel_delayed_work_sync(&fw_priv->timeout_work); - fw_priv->buf = NULL; - device_remove_file(f_dev, &dev_attr_loading); err_del_bin_attr: device_remove_bin_file(f_dev, &firmware_attr_data); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3063452e55d..49394e3f31b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1036,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) char *name; u64 segment; int ret; + char *name_format; name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); if (!name) return NULL; segment = offset >> rbd_dev->header.obj_order; - ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", + name_format = "%s.%012llx"; + if (rbd_dev->image_format == 2) + name_format = "%s.%016llx"; + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format, rbd_dev->header.object_prefix, segment); if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { pr_err("error formatting segment name for #%llu (%d)\n", diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 934cfd18f72..1144e8c7579 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1955,6 +1955,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) /* XXX the notifier code should handle this better */ if (!cn->notifier_head.head) { srcu_cleanup_notifier_head(&cn->notifier_head); + list_del(&cn->node); kfree(cn); } diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 5c97e75924a..22d7699e7ce 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = { /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; -PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; +PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", }; PNAME(mout_mpll_fout_p) = { "fout_mplldiv2", "fout_mpll" }; PNAME(mout_mpll_p) = { "fin_pll", "mout_mpll_fout" }; PNAME(mout_bpll_fout_p) = { "fout_bplldiv2", "fout_bpll" }; @@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = { }; struct samsung_mux_clock exynos5250_mux_clks[] __initdata = { - MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1), - MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1), + MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"), + MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"), MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1), - MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1), + MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"), MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1), MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1), MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1), @@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), - GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0), + GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0), diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 89135f6be11..362f12dcd94 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -111,7 +111,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw); - u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1; + u32 mdiv, pdiv, sdiv, pll_con0, pll_con1; + s16 kdiv; u64 fvco = parent_rate; pll_con0 = __raw_readl(pll->con_reg); @@ -119,7 +120,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw, mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK; pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK; sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK; - kdiv = pll_con1 & PLL36XX_KDIV_MASK; + kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK); fvco *= (mdiv << 16) + kdiv; do_div(fvco, (pdiv << sdiv)); diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index f9ec43fd132..080c3c5e33f 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base) clk_register_clkdev(clk, NULL, "60100000.serial"); } #else -static inline void spear320_clk_init(void) { } +static inline void spear320_clk_init(void __iomem *soc_config_base) { } #endif void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index c6921f538e2..ba99e384410 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -1598,6 +1598,12 @@ static void __init tegra30_periph_clk_init(void) clk_register_clkdev(clk, "afi", "tegra-pcie"); clks[afi] = clk; + /* pciex */ + clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0, + 74, &periph_u_regs, periph_clk_enb_refcnt); + clk_register_clkdev(clk, "pciex", "tegra-pcie"); + clks[pciex] = clk; + /* kfuse */ clk = tegra_clk_register_periph_gate("kfuse", "clk_m", TEGRA_PERIPH_ON_APB, @@ -1716,11 +1722,6 @@ static void __init tegra30_fixed_clk_init(void) 1, 0, &cml_lock); clk_register_clkdev(clk, "cml1", NULL); clks[cml1] = clk; - - /* pciex */ - clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000); - clk_register_clkdev(clk, "pciex", NULL); - clks[pciex] = clk; } static void __init tegra30_osc_clk_init(void) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index dcde35231e2..5b7b9110254 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, if (ret) return ERR_PTR(ret); } - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, - 0600); + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); } EXPORT_SYMBOL(drm_gem_prime_export); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0e534169592..6948eb88c2b 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev) int r600_uvd_init(struct radeon_device *rdev) { int i, j, r; + /* disable byte swapping */ + u32 lmi_swap_cntl = 0; + u32 mp_swap_cntl = 0; /* raise clocks while booting up the VCPU */ radeon_set_uvd_clocks(rdev, 53300, 40000); @@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev) WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | (1 << 21) | (1 << 9) | (1 << 20)); - /* disable byte swapping */ - WREG32(UVD_LMI_SWAP_CNTL, 0); - WREG32(UVD_MP_SWAP_CNTL, 0); +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; +#endif + WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); WREG32(UVD_MPC_SET_MUXA1, 0x0); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 189973836cf..b0dc0b6cb4e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) */ void radeon_wb_disable(struct radeon_device *rdev) { - int r; - - if (rdev->wb.wb_obj) { - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) - return; - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - } rdev->wb.enabled = false; } @@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev) { radeon_wb_disable(rdev); if (rdev->wb.wb_obj) { + if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); + } radeon_bo_unref(&rdev->wb.wb_obj); rdev->wb.wb = NULL; rdev->wb.wb_obj = NULL; @@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev) dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); return r; } - } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->wb.wb_obj); + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); radeon_bo_unreserve(rdev->wb.wb_obj); - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); - radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; + if (r) { + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } } /* clear wb memory */ diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 5b937dfe6f6..ddb8f8e04eb 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) { struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - *drv->cpu_addr = cpu_to_le32(seq); + if (drv->cpu_addr) { + *drv->cpu_addr = cpu_to_le32(seq); + } } else { WREG32(drv->scratch_reg, seq); } @@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) u32 seq = 0; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - seq = le32_to_cpu(*drv->cpu_addr); + if (drv->cpu_addr) { + seq = le32_to_cpu(*drv->cpu_addr); + } else { + seq = lower_32_bits(atomic64_read(&drv->last_seq)); + } } else { seq = RREG32(drv->scratch_reg); } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 2c1341f63dc..43ec4a401f0 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo_va *bo_va) { - int r; + int r = 0; mutex_lock(&rdev->vm_manager.lock); mutex_lock(&bo_va->vm->mutex); - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); + if (bo_va->soffset) { + r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); + } mutex_unlock(&rdev->vm_manager.lock); list_del(&bo_va->vm_list); mutex_unlock(&bo_va->vm->mutex); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index e17faa7cf73..82434018cbe 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ + radeon_ring_free_size(rdev, ring); + if (ring->ring_free_dw == (ring->ring_size / 4)) { + /* This is an empty ring update lockup info to avoid + * false positive. + */ + radeon_ring_lockup_update(ring); + } ndw = (ndw + ring->align_mask) & ~ring->align_mask; while (ndw > (ring->ring_free_dw - 1)) { radeon_ring_free_size(rdev, ring); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 906e5c0ca3b..cad735dd02c 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev) if (!r) { radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo); + rdev->uvd.cpu_addr = NULL; + if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { + radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); + } radeon_bo_unreserve(rdev->uvd.vcpu_bo); + + if (rdev->uvd.cpu_addr) { + radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); + } else { + rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; + } } return r; } @@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev) return r; } + /* Have been pin in cpu unmap unpin */ + radeon_bo_kunmap(rdev->uvd.vcpu_bo); + radeon_bo_unpin(rdev->uvd.vcpu_bo); + r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, &rdev->uvd.gpu_addr); if (r) { @@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD create msg */ - msg[0] = 0x00000de4; - msg[1] = 0x00000000; - msg[2] = handle; - msg[3] = 0x00000000; - msg[4] = 0x00000000; - msg[5] = 0x00000000; - msg[6] = 0x00000000; - msg[7] = 0x00000780; - msg[8] = 0x00000440; - msg[9] = 0x00000000; - msg[10] = 0x01b37000; + msg[0] = cpu_to_le32(0x00000de4); + msg[1] = cpu_to_le32(0x00000000); + msg[2] = cpu_to_le32(handle); + msg[3] = cpu_to_le32(0x00000000); + msg[4] = cpu_to_le32(0x00000000); + msg[5] = cpu_to_le32(0x00000000); + msg[6] = cpu_to_le32(0x00000000); + msg[7] = cpu_to_le32(0x00000780); + msg[8] = cpu_to_le32(0x00000440); + msg[9] = cpu_to_le32(0x00000000); + msg[10] = cpu_to_le32(0x01b37000); for (i = 11; i < 1024; ++i) - msg[i] = 0x0; + msg[i] = cpu_to_le32(0x0); radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); @@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD destroy msg */ - msg[0] = 0x00000de4; - msg[1] = 0x00000002; - msg[2] = handle; - msg[3] = 0x00000000; + msg[0] = cpu_to_le32(0x00000de4); + msg[1] = cpu_to_le32(0x00000002); + msg[2] = cpu_to_le32(handle); + msg[3] = cpu_to_le32(0x00000000); for (i = 4; i < 1024; ++i) - msg[i] = 0x0; + msg[i] = cpu_to_le32(0x0); radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index c332fb98480..957cfd4f806 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -261,4 +261,17 @@ config SHMOBILE_IOMMU_L1SIZE default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB +config ARM_SMMU + bool "ARM Ltd. System MMU (SMMU) Support" + depends on ARM64 || (ARM_LPAE && OF) + select IOMMU_API + select ARM_DMA_USE_IOMMU if ARM + help + Support for implementations of the ARM System MMU architecture + versions 1 and 2. The driver supports both v7l and v8l table + formats with 4k and 64k page sizes. + + Say Y here if your SoC includes an IOMMU device implementing + the ARM SMMU architecture. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index ef0e5207ad6..bbe7041212d 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o +obj-$(CONFIG_ARM_SMMU) += arm-smmu.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 21d02b0d907..6dc659426a5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -287,14 +287,27 @@ static struct pci_dev *get_isolation_root(struct pci_dev *pdev) /* * If it's a multifunction device that does not support our - * required ACS flags, add to the same group as function 0. + * required ACS flags, add to the same group as lowest numbered + * function that also does not suport the required ACS flags. */ if (dma_pdev->multifunction && - !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) - swap_pci_ref(&dma_pdev, - pci_get_slot(dma_pdev->bus, - PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), - 0))); + !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { + u8 i, slot = PCI_SLOT(dma_pdev->devfn); + + for (i = 0; i < 8; i++) { + struct pci_dev *tmp; + + tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); + if (!tmp) + continue; + + if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { + swap_pci_ref(&dma_pdev, tmp); + break; + } + pci_dev_put(tmp); + } + } /* * Devices on the root bus go through the iommu. If that's not us, @@ -1484,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, /* Large PTE found which maps this address */ unmap_size = PTE_PAGE_SIZE(*pte); + + /* Only unmap from the first pte in the page */ + if ((unmap_size - 1) & bus_addr) + break; count = PAGE_SIZE_PTE_COUNT(unmap_size); for (i = 0; i < count; i++) pte[i] = 0ULL; @@ -1493,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, unmapped += unmap_size; } - BUG_ON(!is_power_of_2(unmapped)); + BUG_ON(unmapped && !is_power_of_2(unmapped)); return unmapped; } @@ -1893,34 +1910,59 @@ static void domain_id_free(int id) write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); } +#define DEFINE_FREE_PT_FN(LVL, FN) \ +static void free_pt_##LVL (unsigned long __pt) \ +{ \ + unsigned long p; \ + u64 *pt; \ + int i; \ + \ + pt = (u64 *)__pt; \ + \ + for (i = 0; i < 512; ++i) { \ + if (!IOMMU_PTE_PRESENT(pt[i])) \ + continue; \ + \ + p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \ + FN(p); \ + } \ + free_page((unsigned long)pt); \ +} + +DEFINE_FREE_PT_FN(l2, free_page) +DEFINE_FREE_PT_FN(l3, free_pt_l2) +DEFINE_FREE_PT_FN(l4, free_pt_l3) +DEFINE_FREE_PT_FN(l5, free_pt_l4) +DEFINE_FREE_PT_FN(l6, free_pt_l5) + static void free_pagetable(struct protection_domain *domain) { - int i, j; - u64 *p1, *p2, *p3; - - p1 = domain->pt_root; - - if (!p1) - return; - - for (i = 0; i < 512; ++i) { - if (!IOMMU_PTE_PRESENT(p1[i])) - continue; - - p2 = IOMMU_PTE_PAGE(p1[i]); - for (j = 0; j < 512; ++j) { - if (!IOMMU_PTE_PRESENT(p2[j])) - continue; - p3 = IOMMU_PTE_PAGE(p2[j]); - free_page((unsigned long)p3); - } + unsigned long root = (unsigned long)domain->pt_root; - free_page((unsigned long)p2); + switch (domain->mode) { + case PAGE_MODE_NONE: + break; + case PAGE_MODE_1_LEVEL: + free_page(root); + break; + case PAGE_MODE_2_LEVEL: + free_pt_l2(root); + break; + case PAGE_MODE_3_LEVEL: + free_pt_l3(root); + break; + case PAGE_MODE_4_LEVEL: + free_pt_l4(root); + break; + case PAGE_MODE_5_LEVEL: + free_pt_l5(root); + break; + case PAGE_MODE_6_LEVEL: + free_pt_l6(root); + break; + default: + BUG(); } - - free_page((unsigned long)p1); - - domain->pt_root = NULL; } static void free_gcr3_tbl_level1(u64 *tbl) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c new file mode 100644 index 00000000000..ebd0a4cff04 --- /dev/null +++ b/drivers/iommu/arm-smmu.c @@ -0,0 +1,1969 @@ +/* + * IOMMU API for ARM architected SMMU implementations. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2013 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + * + * This driver currently supports: + * - SMMUv1 and v2 implementations + * - Stream-matching and stream-indexing + * - v7/v8 long-descriptor format + * - Non-secure access to the SMMU + * - 4k and 64k pages, with contiguous pte hints. + * - Up to 39-bit addressing + * - Context fault reporting + */ + +#define pr_fmt(fmt) "arm-smmu: " fmt + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <linux/amba/bus.h> + +#include <asm/pgalloc.h> + +/* Maximum number of stream IDs assigned to a single device */ +#define MAX_MASTER_STREAMIDS 8 + +/* Maximum number of context banks per SMMU */ +#define ARM_SMMU_MAX_CBS 128 + +/* Maximum number of mapping groups per SMMU */ +#define ARM_SMMU_MAX_SMRS 128 + +/* Number of VMIDs per SMMU */ +#define ARM_SMMU_NUM_VMIDS 256 + +/* SMMU global address space */ +#define ARM_SMMU_GR0(smmu) ((smmu)->base) +#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) + +/* Page table bits */ +#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) +#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) +#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) +#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) +#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) +#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) + +#if PAGE_SIZE == SZ_4K +#define ARM_SMMU_PTE_CONT_ENTRIES 16 +#elif PAGE_SIZE == SZ_64K +#define ARM_SMMU_PTE_CONT_ENTRIES 32 +#else +#define ARM_SMMU_PTE_CONT_ENTRIES 1 +#endif + +#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) +#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) +#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) + +/* Stage-1 PTE */ +#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) +#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) +#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 + +/* Stage-2 PTE */ +#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) +#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) +#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) +#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) +#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) +#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) + +/* Configuration registers */ +#define ARM_SMMU_GR0_sCR0 0x0 +#define sCR0_CLIENTPD (1 << 0) +#define sCR0_GFRE (1 << 1) +#define sCR0_GFIE (1 << 2) +#define sCR0_GCFGFRE (1 << 4) +#define sCR0_GCFGFIE (1 << 5) +#define sCR0_USFCFG (1 << 10) +#define sCR0_VMIDPNE (1 << 11) +#define sCR0_PTM (1 << 12) +#define sCR0_FB (1 << 13) +#define sCR0_BSU_SHIFT 14 +#define sCR0_BSU_MASK 0x3 + +/* Identification registers */ +#define ARM_SMMU_GR0_ID0 0x20 +#define ARM_SMMU_GR0_ID1 0x24 +#define ARM_SMMU_GR0_ID2 0x28 +#define ARM_SMMU_GR0_ID3 0x2c +#define ARM_SMMU_GR0_ID4 0x30 +#define ARM_SMMU_GR0_ID5 0x34 +#define ARM_SMMU_GR0_ID6 0x38 +#define ARM_SMMU_GR0_ID7 0x3c +#define ARM_SMMU_GR0_sGFSR 0x48 +#define ARM_SMMU_GR0_sGFSYNR0 0x50 +#define ARM_SMMU_GR0_sGFSYNR1 0x54 +#define ARM_SMMU_GR0_sGFSYNR2 0x58 +#define ARM_SMMU_GR0_PIDR0 0xfe0 +#define ARM_SMMU_GR0_PIDR1 0xfe4 +#define ARM_SMMU_GR0_PIDR2 0xfe8 + +#define ID0_S1TS (1 << 30) +#define ID0_S2TS (1 << 29) +#define ID0_NTS (1 << 28) +#define ID0_SMS (1 << 27) +#define ID0_PTFS_SHIFT 24 +#define ID0_PTFS_MASK 0x2 +#define ID0_PTFS_V8_ONLY 0x2 +#define ID0_CTTW (1 << 14) +#define ID0_NUMIRPT_SHIFT 16 +#define ID0_NUMIRPT_MASK 0xff +#define ID0_NUMSMRG_SHIFT 0 +#define ID0_NUMSMRG_MASK 0xff + +#define ID1_PAGESIZE (1 << 31) +#define ID1_NUMPAGENDXB_SHIFT 28 +#define ID1_NUMPAGENDXB_MASK 7 +#define ID1_NUMS2CB_SHIFT 16 +#define ID1_NUMS2CB_MASK 0xff +#define ID1_NUMCB_SHIFT 0 +#define ID1_NUMCB_MASK 0xff + +#define ID2_OAS_SHIFT 4 +#define ID2_OAS_MASK 0xf +#define ID2_IAS_SHIFT 0 +#define ID2_IAS_MASK 0xf +#define ID2_UBS_SHIFT 8 +#define ID2_UBS_MASK 0xf +#define ID2_PTFS_4K (1 << 12) +#define ID2_PTFS_16K (1 << 13) +#define ID2_PTFS_64K (1 << 14) + +#define PIDR2_ARCH_SHIFT 4 +#define PIDR2_ARCH_MASK 0xf + +/* Global TLB invalidation */ +#define ARM_SMMU_GR0_STLBIALL 0x60 +#define ARM_SMMU_GR0_TLBIVMID 0x64 +#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 +#define ARM_SMMU_GR0_TLBIALLH 0x6c +#define ARM_SMMU_GR0_sTLBGSYNC 0x70 +#define ARM_SMMU_GR0_sTLBGSTATUS 0x74 +#define sTLBGSTATUS_GSACTIVE (1 << 0) +#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ + +/* Stream mapping registers */ +#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) +#define SMR_VALID (1 << 31) +#define SMR_MASK_SHIFT 16 +#define SMR_MASK_MASK 0x7fff +#define SMR_ID_SHIFT 0 +#define SMR_ID_MASK 0x7fff + +#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) +#define S2CR_CBNDX_SHIFT 0 +#define S2CR_CBNDX_MASK 0xff +#define S2CR_TYPE_SHIFT 16 +#define S2CR_TYPE_MASK 0x3 +#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) +#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) +#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) + +/* Context bank attribute registers */ +#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) +#define CBAR_VMID_SHIFT 0 +#define CBAR_VMID_MASK 0xff +#define CBAR_S1_MEMATTR_SHIFT 12 +#define CBAR_S1_MEMATTR_MASK 0xf +#define CBAR_S1_MEMATTR_WB 0xf +#define CBAR_TYPE_SHIFT 16 +#define CBAR_TYPE_MASK 0x3 +#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) +#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) +#define CBAR_IRPTNDX_SHIFT 24 +#define CBAR_IRPTNDX_MASK 0xff + +#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) +#define CBA2R_RW64_32BIT (0 << 0) +#define CBA2R_RW64_64BIT (1 << 0) + +/* Translation context bank */ +#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) +#define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize) + +#define ARM_SMMU_CB_SCTLR 0x0 +#define ARM_SMMU_CB_RESUME 0x8 +#define ARM_SMMU_CB_TTBCR2 0x10 +#define ARM_SMMU_CB_TTBR0_LO 0x20 +#define ARM_SMMU_CB_TTBR0_HI 0x24 +#define ARM_SMMU_CB_TTBCR 0x30 +#define ARM_SMMU_CB_S1_MAIR0 0x38 +#define ARM_SMMU_CB_FSR 0x58 +#define ARM_SMMU_CB_FAR_LO 0x60 +#define ARM_SMMU_CB_FAR_HI 0x64 +#define ARM_SMMU_CB_FSYNR0 0x68 + +#define SCTLR_S1_ASIDPNE (1 << 12) +#define SCTLR_CFCFG (1 << 7) +#define SCTLR_CFIE (1 << 6) +#define SCTLR_CFRE (1 << 5) +#define SCTLR_E (1 << 4) +#define SCTLR_AFE (1 << 2) +#define SCTLR_TRE (1 << 1) +#define SCTLR_M (1 << 0) +#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) + +#define RESUME_RETRY (0 << 0) +#define RESUME_TERMINATE (1 << 0) + +#define TTBCR_EAE (1 << 31) + +#define TTBCR_PASIZE_SHIFT 16 +#define TTBCR_PASIZE_MASK 0x7 + +#define TTBCR_TG0_4K (0 << 14) +#define TTBCR_TG0_64K (1 << 14) + +#define TTBCR_SH0_SHIFT 12 +#define TTBCR_SH0_MASK 0x3 +#define TTBCR_SH_NS 0 +#define TTBCR_SH_OS 2 +#define TTBCR_SH_IS 3 + +#define TTBCR_ORGN0_SHIFT 10 +#define TTBCR_IRGN0_SHIFT 8 +#define TTBCR_RGN_MASK 0x3 +#define TTBCR_RGN_NC 0 +#define TTBCR_RGN_WBWA 1 +#define TTBCR_RGN_WT 2 +#define TTBCR_RGN_WB 3 + +#define TTBCR_SL0_SHIFT 6 +#define TTBCR_SL0_MASK 0x3 +#define TTBCR_SL0_LVL_2 0 +#define TTBCR_SL0_LVL_1 1 + +#define TTBCR_T1SZ_SHIFT 16 +#define TTBCR_T0SZ_SHIFT 0 +#define TTBCR_SZ_MASK 0xf + +#define TTBCR2_SEP_SHIFT 15 +#define TTBCR2_SEP_MASK 0x7 + +#define TTBCR2_PASIZE_SHIFT 0 +#define TTBCR2_PASIZE_MASK 0x7 + +/* Common definitions for PASize and SEP fields */ +#define TTBCR2_ADDR_32 0 +#define TTBCR2_ADDR_36 1 +#define TTBCR2_ADDR_40 2 +#define TTBCR2_ADDR_42 3 +#define TTBCR2_ADDR_44 4 +#define TTBCR2_ADDR_48 5 + +#define MAIR_ATTR_SHIFT(n) ((n) << 3) +#define MAIR_ATTR_MASK 0xff +#define MAIR_ATTR_DEVICE 0x04 +#define MAIR_ATTR_NC 0x44 +#define MAIR_ATTR_WBRWA 0xff +#define MAIR_ATTR_IDX_NC 0 +#define MAIR_ATTR_IDX_CACHE 1 +#define MAIR_ATTR_IDX_DEV 2 + +#define FSR_MULTI (1 << 31) +#define FSR_SS (1 << 30) +#define FSR_UUT (1 << 8) +#define FSR_ASF (1 << 7) +#define FSR_TLBLKF (1 << 6) +#define FSR_TLBMCF (1 << 5) +#define FSR_EF (1 << 4) +#define FSR_PF (1 << 3) +#define FSR_AFF (1 << 2) +#define FSR_TF (1 << 1) + +#define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \ + FSR_TLBLKF) +#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ + FSR_EF | FSR_PF | FSR_TF) + +#define FSYNR0_WNR (1 << 4) + +struct arm_smmu_smr { + u8 idx; + u16 mask; + u16 id; +}; + +struct arm_smmu_master { + struct device_node *of_node; + + /* + * The following is specific to the master's position in the + * SMMU chain. + */ + struct rb_node node; + int num_streamids; + u16 streamids[MAX_MASTER_STREAMIDS]; + + /* + * We only need to allocate these on the root SMMU, as we + * configure unmatched streams to bypass translation. + */ + struct arm_smmu_smr *smrs; +}; + +struct arm_smmu_device { + struct device *dev; + struct device_node *parent_of_node; + + void __iomem *base; + unsigned long size; + unsigned long pagesize; + +#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) +#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) +#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) +#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) +#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) + u32 features; + int version; + + u32 num_context_banks; + u32 num_s2_context_banks; + DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); + atomic_t irptndx; + + u32 num_mapping_groups; + DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); + + unsigned long input_size; + unsigned long s1_output_size; + unsigned long s2_output_size; + + u32 num_global_irqs; + u32 num_context_irqs; + unsigned int *irqs; + + DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS); + + struct list_head list; + struct rb_root masters; +}; + +struct arm_smmu_cfg { + struct arm_smmu_device *smmu; + u8 vmid; + u8 cbndx; + u8 irptndx; + u32 cbar; + pgd_t *pgd; +}; + +struct arm_smmu_domain { + /* + * A domain can span across multiple, chained SMMUs and requires + * all devices within the domain to follow the same translation + * path. + */ + struct arm_smmu_device *leaf_smmu; + struct arm_smmu_cfg root_cfg; + phys_addr_t output_mask; + + spinlock_t lock; +}; + +static DEFINE_SPINLOCK(arm_smmu_devices_lock); +static LIST_HEAD(arm_smmu_devices); + +static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, + struct device_node *dev_node) +{ + struct rb_node *node = smmu->masters.rb_node; + + while (node) { + struct arm_smmu_master *master; + master = container_of(node, struct arm_smmu_master, node); + + if (dev_node < master->of_node) + node = node->rb_left; + else if (dev_node > master->of_node) + node = node->rb_right; + else + return master; + } + + return NULL; +} + +static int insert_smmu_master(struct arm_smmu_device *smmu, + struct arm_smmu_master *master) +{ + struct rb_node **new, *parent; + + new = &smmu->masters.rb_node; + parent = NULL; + while (*new) { + struct arm_smmu_master *this; + this = container_of(*new, struct arm_smmu_master, node); + + parent = *new; + if (master->of_node < this->of_node) + new = &((*new)->rb_left); + else if (master->of_node > this->of_node) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + rb_link_node(&master->node, parent, new); + rb_insert_color(&master->node, &smmu->masters); + return 0; +} + +static int register_smmu_master(struct arm_smmu_device *smmu, + struct device *dev, + struct of_phandle_args *masterspec) +{ + int i; + struct arm_smmu_master *master; + + master = find_smmu_master(smmu, masterspec->np); + if (master) { + dev_err(dev, + "rejecting multiple registrations for master device %s\n", + masterspec->np->name); + return -EBUSY; + } + + if (masterspec->args_count > MAX_MASTER_STREAMIDS) { + dev_err(dev, + "reached maximum number (%d) of stream IDs for master device %s\n", + MAX_MASTER_STREAMIDS, masterspec->np->name); + return -ENOSPC; + } + + master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + master->of_node = masterspec->np; + master->num_streamids = masterspec->args_count; + + for (i = 0; i < master->num_streamids; ++i) + master->streamids[i] = masterspec->args[i]; + + return insert_smmu_master(smmu, master); +} + +static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu) +{ + struct arm_smmu_device *parent; + + if (!smmu->parent_of_node) + return NULL; + + spin_lock(&arm_smmu_devices_lock); + list_for_each_entry(parent, &arm_smmu_devices, list) + if (parent->dev->of_node == smmu->parent_of_node) + goto out_unlock; + + parent = NULL; + dev_warn(smmu->dev, + "Failed to find SMMU parent despite parent in DT\n"); +out_unlock: + spin_unlock(&arm_smmu_devices_lock); + return parent; +} + +static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) +{ + int idx; + + do { + idx = find_next_zero_bit(map, end, start); + if (idx == end) + return -ENOSPC; + } while (test_and_set_bit(idx, map)); + + return idx; +} + +static void __arm_smmu_free_bitmap(unsigned long *map, int idx) +{ + clear_bit(idx, map); +} + +/* Wait for any pending TLB invalidations to complete */ +static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) +{ + int count = 0; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); + while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) + & sTLBGSTATUS_GSACTIVE) { + cpu_relax(); + if (++count == TLB_LOOP_TIMEOUT) { + dev_err_ratelimited(smmu->dev, + "TLB sync timed out -- SMMU may be deadlocked\n"); + return; + } + udelay(1); + } +} + +static irqreturn_t arm_smmu_context_fault(int irq, void *dev) +{ + int flags, ret; + u32 fsr, far, fsynr, resume; + unsigned long iova; + struct iommu_domain *domain = dev; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + struct arm_smmu_device *smmu = root_cfg->smmu; + void __iomem *cb_base; + + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); + fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); + + if (!(fsr & FSR_FAULT)) + return IRQ_NONE; + + if (fsr & FSR_IGN) + dev_err_ratelimited(smmu->dev, + "Unexpected context fault (fsr 0x%u)\n", + fsr); + + fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); + flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; + + far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); + iova = far; +#ifdef CONFIG_64BIT + far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); + iova |= ((unsigned long)far << 32); +#endif + + if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { + ret = IRQ_HANDLED; + resume = RESUME_RETRY; + } else { + ret = IRQ_NONE; + resume = RESUME_TERMINATE; + } + + /* Clear the faulting FSR */ + writel(fsr, cb_base + ARM_SMMU_CB_FSR); + + /* Retry or terminate any stalled transactions */ + if (fsr & FSR_SS) + writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); + + return ret; +} + +static irqreturn_t arm_smmu_global_fault(int irq, void *dev) +{ + u32 gfsr, gfsynr0, gfsynr1, gfsynr2; + struct arm_smmu_device *smmu = dev; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); + gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); + gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); + gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); + + dev_err_ratelimited(smmu->dev, + "Unexpected global fault, this could be serious\n"); + dev_err_ratelimited(smmu->dev, + "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", + gfsr, gfsynr0, gfsynr1, gfsynr2); + + writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); + return IRQ_NONE; +} + +static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) +{ + u32 reg; + bool stage1; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + struct arm_smmu_device *smmu = root_cfg->smmu; + void __iomem *cb_base, *gr0_base, *gr1_base; + + gr0_base = ARM_SMMU_GR0(smmu); + gr1_base = ARM_SMMU_GR1(smmu); + stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS; + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); + + /* CBAR */ + reg = root_cfg->cbar | + (root_cfg->vmid << CBAR_VMID_SHIFT); + if (smmu->version == 1) + reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; + + /* Use the weakest memory type, so it is overridden by the pte */ + if (stage1) + reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); + + if (smmu->version > 1) { + /* CBA2R */ +#ifdef CONFIG_64BIT + reg = CBA2R_RW64_64BIT; +#else + reg = CBA2R_RW64_32BIT; +#endif + writel_relaxed(reg, + gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx)); + + /* TTBCR2 */ + switch (smmu->input_size) { + case 32: + reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); + break; + case 36: + reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); + break; + case 39: + reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); + break; + case 42: + reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); + break; + case 44: + reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); + break; + case 48: + reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); + break; + } + + switch (smmu->s1_output_size) { + case 32: + reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); + break; + case 36: + reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); + break; + case 39: + reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); + break; + case 42: + reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); + break; + case 44: + reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); + break; + case 48: + reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); + break; + } + + if (stage1) + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); + } + + /* TTBR0 */ + reg = __pa(root_cfg->pgd); +#ifndef __BIG_ENDIAN + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); + reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); +#else + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); + reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); +#endif + + /* + * TTBCR + * We use long descriptor, with inner-shareable WBWA tables in TTBR0. + */ + if (smmu->version > 1) { + if (PAGE_SIZE == SZ_4K) + reg = TTBCR_TG0_4K; + else + reg = TTBCR_TG0_64K; + + if (!stage1) { + switch (smmu->s2_output_size) { + case 32: + reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); + break; + case 36: + reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); + break; + case 40: + reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); + break; + case 42: + reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); + break; + case 44: + reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); + break; + case 48: + reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); + break; + } + } else { + reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; + } + } else { + reg = 0; + } + + reg |= TTBCR_EAE | + (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | + (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | + (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | + (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); + + /* MAIR0 (stage-1 only) */ + if (stage1) { + reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | + (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | + (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); + } + + /* Nuke the TLB */ + writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); + arm_smmu_tlb_sync(smmu); + + /* SCTLR */ + reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; + if (stage1) + reg |= SCTLR_S1_ASIDPNE; +#ifdef __BIG_ENDIAN + reg |= SCTLR_E; +#endif + writel(reg, cb_base + ARM_SMMU_CB_SCTLR); +} + +static int arm_smmu_init_domain_context(struct iommu_domain *domain, + struct device *dev) +{ + int irq, ret, start; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + struct arm_smmu_device *smmu, *parent; + + /* + * Walk the SMMU chain to find the root device for this chain. + * We assume that no masters have translations which terminate + * early, and therefore check that the root SMMU does indeed have + * a StreamID for the master in question. + */ + parent = dev->archdata.iommu; + smmu_domain->output_mask = -1; + do { + smmu = parent; + smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1; + } while ((parent = find_parent_smmu(smmu))); + + if (!find_smmu_master(smmu, dev->of_node)) { + dev_err(dev, "unable to find root SMMU for device\n"); + return -ENODEV; + } + + ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS); + if (IS_ERR_VALUE(ret)) + return ret; + + root_cfg->vmid = ret; + if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { + /* + * We will likely want to change this if/when KVM gets + * involved. + */ + root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + start = smmu->num_s2_context_banks; + } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) { + root_cfg->cbar = CBAR_TYPE_S2_TRANS; + start = 0; + } else { + root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + start = smmu->num_s2_context_banks; + } + + ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, + smmu->num_context_banks); + if (IS_ERR_VALUE(ret)) + goto out_free_vmid; + + root_cfg->cbndx = ret; + + if (smmu->version == 1) { + root_cfg->irptndx = atomic_inc_return(&smmu->irptndx); + root_cfg->irptndx %= smmu->num_context_irqs; + } else { + root_cfg->irptndx = root_cfg->cbndx; + } + + irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; + ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, + "arm-smmu-context-fault", domain); + if (IS_ERR_VALUE(ret)) { + dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", + root_cfg->irptndx, irq); + root_cfg->irptndx = -1; + goto out_free_context; + } + + root_cfg->smmu = smmu; + arm_smmu_init_context_bank(smmu_domain); + return ret; + +out_free_context: + __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); +out_free_vmid: + __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid); + return ret; +} + +static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + struct arm_smmu_device *smmu = root_cfg->smmu; + int irq; + + if (!smmu) + return; + + if (root_cfg->irptndx != -1) { + irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; + free_irq(irq, domain); + } + + __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid); + __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); +} + +static int arm_smmu_domain_init(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain; + pgd_t *pgd; + + /* + * Allocate the domain and initialise some of its data structures. + * We can't really do anything meaningful until we've added a + * master. + */ + smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); + if (!smmu_domain) + return -ENOMEM; + + pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + if (!pgd) + goto out_free_domain; + smmu_domain->root_cfg.pgd = pgd; + + spin_lock_init(&smmu_domain->lock); + domain->priv = smmu_domain; + return 0; + +out_free_domain: + kfree(smmu_domain); + return -ENOMEM; +} + +static void arm_smmu_free_ptes(pmd_t *pmd) +{ + pgtable_t table = pmd_pgtable(*pmd); + pgtable_page_dtor(table); + __free_page(table); +} + +static void arm_smmu_free_pmds(pud_t *pud) +{ + int i; + pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); + + pmd = pmd_base; + for (i = 0; i < PTRS_PER_PMD; ++i) { + if (pmd_none(*pmd)) + continue; + + arm_smmu_free_ptes(pmd); + pmd++; + } + + pmd_free(NULL, pmd_base); +} + +static void arm_smmu_free_puds(pgd_t *pgd) +{ + int i; + pud_t *pud, *pud_base = pud_offset(pgd, 0); + + pud = pud_base; + for (i = 0; i < PTRS_PER_PUD; ++i) { + if (pud_none(*pud)) + continue; + + arm_smmu_free_pmds(pud); + pud++; + } + + pud_free(NULL, pud_base); +} + +static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) +{ + int i; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + pgd_t *pgd, *pgd_base = root_cfg->pgd; + + /* + * Recursively free the page tables for this domain. We don't + * care about speculative TLB filling, because the TLB will be + * nuked next time this context bank is re-allocated and no devices + * currently map to these tables. + */ + pgd = pgd_base; + for (i = 0; i < PTRS_PER_PGD; ++i) { + if (pgd_none(*pgd)) + continue; + arm_smmu_free_puds(pgd); + pgd++; + } + + kfree(pgd_base); +} + +static void arm_smmu_domain_destroy(struct iommu_domain *domain) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + arm_smmu_destroy_domain_context(domain); + arm_smmu_free_pgtables(smmu_domain); + kfree(smmu_domain); +} + +static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, + struct arm_smmu_master *master) +{ + int i; + struct arm_smmu_smr *smrs; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) + return 0; + + if (master->smrs) + return -EEXIST; + + smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL); + if (!smrs) { + dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n", + master->num_streamids, master->of_node->name); + return -ENOMEM; + } + + /* Allocate the SMRs on the root SMMU */ + for (i = 0; i < master->num_streamids; ++i) { + int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, + smmu->num_mapping_groups); + if (IS_ERR_VALUE(idx)) { + dev_err(smmu->dev, "failed to allocate free SMR\n"); + goto err_free_smrs; + } + + smrs[i] = (struct arm_smmu_smr) { + .idx = idx, + .mask = 0, /* We don't currently share SMRs */ + .id = master->streamids[i], + }; + } + + /* It worked! Now, poke the actual hardware */ + for (i = 0; i < master->num_streamids; ++i) { + u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | + smrs[i].mask << SMR_MASK_SHIFT; + writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); + } + + master->smrs = smrs; + return 0; + +err_free_smrs: + while (--i >= 0) + __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); + kfree(smrs); + return -ENOSPC; +} + +static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, + struct arm_smmu_master *master) +{ + int i; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + struct arm_smmu_smr *smrs = master->smrs; + + /* Invalidate the SMRs before freeing back to the allocator */ + for (i = 0; i < master->num_streamids; ++i) { + u8 idx = smrs[i].idx; + writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); + __arm_smmu_free_bitmap(smmu->smr_map, idx); + } + + master->smrs = NULL; + kfree(smrs); +} + +static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, + struct arm_smmu_master *master) +{ + int i; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + for (i = 0; i < master->num_streamids; ++i) { + u16 sid = master->streamids[i]; + writel_relaxed(S2CR_TYPE_BYPASS, + gr0_base + ARM_SMMU_GR0_S2CR(sid)); + } +} + +static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master *master) +{ + int i, ret; + struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + ret = arm_smmu_master_configure_smrs(smmu, master); + if (ret) + return ret; + + /* Bypass the leaves */ + smmu = smmu_domain->leaf_smmu; + while ((parent = find_parent_smmu(smmu))) { + /* + * We won't have a StreamID match for anything but the root + * smmu, so we only need to worry about StreamID indexing, + * where we must install bypass entries in the S2CRs. + */ + if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) + continue; + + arm_smmu_bypass_stream_mapping(smmu, master); + smmu = parent; + } + + /* Now we're at the root, time to point at our context bank */ + for (i = 0; i < master->num_streamids; ++i) { + u32 idx, s2cr; + idx = master->smrs ? master->smrs[i].idx : master->streamids[i]; + s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) | + (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); + writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); + } + + return 0; +} + +static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master *master) +{ + struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu; + + /* + * We *must* clear the S2CR first, because freeing the SMR means + * that it can be re-allocated immediately. + */ + arm_smmu_bypass_stream_mapping(smmu, master); + arm_smmu_master_free_smrs(smmu, master); +} + +static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + int ret = -EINVAL; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_device *device_smmu = dev->archdata.iommu; + struct arm_smmu_master *master; + + if (!device_smmu) { + dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); + return -ENXIO; + } + + /* + * Sanity check the domain. We don't currently support domains + * that cross between different SMMU chains. + */ + spin_lock(&smmu_domain->lock); + if (!smmu_domain->leaf_smmu) { + /* Now that we have a master, we can finalise the domain */ + ret = arm_smmu_init_domain_context(domain, dev); + if (IS_ERR_VALUE(ret)) + goto err_unlock; + + smmu_domain->leaf_smmu = device_smmu; + } else if (smmu_domain->leaf_smmu != device_smmu) { + dev_err(dev, + "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", + dev_name(smmu_domain->leaf_smmu->dev), + dev_name(device_smmu->dev)); + goto err_unlock; + } + spin_unlock(&smmu_domain->lock); + + /* Looks ok, so add the device to the domain */ + master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); + if (!master) + return -ENODEV; + + return arm_smmu_domain_add_master(smmu_domain, master); + +err_unlock: + spin_unlock(&smmu_domain->lock); + return ret; +} + +static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_master *master; + + master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); + if (master) + arm_smmu_domain_remove_master(smmu_domain, master); +} + +static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, + size_t size) +{ + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + + /* + * If the SMMU can't walk tables in the CPU caches, treat them + * like non-coherent DMA since we need to flush the new entries + * all the way out to memory. There's no possibility of recursion + * here as the SMMU table walker will not be wired through another + * SMMU. + */ + if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) + dma_map_page(smmu->dev, virt_to_page(addr), offset, size, + DMA_TO_DEVICE); +} + +static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, + unsigned long end) +{ + return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && + (addr + ARM_SMMU_PTE_CONT_SIZE <= end); +} + +static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned long pfn, int flags, int stage) +{ + pte_t *pte, *start; + pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; + + if (pmd_none(*pmd)) { + /* Allocate a new set of tables */ + pgtable_t table = alloc_page(PGALLOC_GFP); + if (!table) + return -ENOMEM; + + arm_smmu_flush_pgtable(smmu, page_address(table), + ARM_SMMU_PTE_HWTABLE_SIZE); + pgtable_page_ctor(table); + pmd_populate(NULL, pmd, table); + arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); + } + + if (stage == 1) { + pteval |= ARM_SMMU_PTE_AP_UNPRIV; + if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) + pteval |= ARM_SMMU_PTE_AP_RDONLY; + + if (flags & IOMMU_CACHE) + pteval |= (MAIR_ATTR_IDX_CACHE << + ARM_SMMU_PTE_ATTRINDX_SHIFT); + } else { + pteval |= ARM_SMMU_PTE_HAP_FAULT; + if (flags & IOMMU_READ) + pteval |= ARM_SMMU_PTE_HAP_READ; + if (flags & IOMMU_WRITE) + pteval |= ARM_SMMU_PTE_HAP_WRITE; + if (flags & IOMMU_CACHE) + pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; + else + pteval |= ARM_SMMU_PTE_MEMATTR_NC; + } + + /* If no access, create a faulting entry to avoid TLB fills */ + if (!(flags & (IOMMU_READ | IOMMU_WRITE))) + pteval &= ~ARM_SMMU_PTE_PAGE; + + pteval |= ARM_SMMU_PTE_SH_IS; + start = pmd_page_vaddr(*pmd) + pte_index(addr); + pte = start; + + /* + * Install the page table entries. This is fairly complicated + * since we attempt to make use of the contiguous hint in the + * ptes where possible. The contiguous hint indicates a series + * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically + * contiguous region with the following constraints: + * + * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE + * - Each pte in the region has the contiguous hint bit set + * + * This complicates unmapping (also handled by this code, when + * neither IOMMU_READ or IOMMU_WRITE are set) because it is + * possible, yet highly unlikely, that a client may unmap only + * part of a contiguous range. This requires clearing of the + * contiguous hint bits in the range before installing the new + * faulting entries. + * + * Note that re-mapping an address range without first unmapping + * it is not supported, so TLB invalidation is not required here + * and is instead performed at unmap and domain-init time. + */ + do { + int i = 1; + pteval &= ~ARM_SMMU_PTE_CONT; + + if (arm_smmu_pte_is_contiguous_range(addr, end)) { + i = ARM_SMMU_PTE_CONT_ENTRIES; + pteval |= ARM_SMMU_PTE_CONT; + } else if (pte_val(*pte) & + (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { + int j; + pte_t *cont_start; + unsigned long idx = pte_index(addr); + + idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); + cont_start = pmd_page_vaddr(*pmd) + idx; + for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) + pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT; + + arm_smmu_flush_pgtable(smmu, cont_start, + sizeof(*pte) * + ARM_SMMU_PTE_CONT_ENTRIES); + } + + do { + *pte = pfn_pte(pfn, __pgprot(pteval)); + } while (pte++, pfn++, addr += PAGE_SIZE, --i); + } while (addr != end); + + arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); + return 0; +} + +static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, + unsigned long addr, unsigned long end, + phys_addr_t phys, int flags, int stage) +{ + int ret; + pmd_t *pmd; + unsigned long next, pfn = __phys_to_pfn(phys); + +#ifndef __PAGETABLE_PMD_FOLDED + if (pud_none(*pud)) { + pmd = pmd_alloc_one(NULL, addr); + if (!pmd) + return -ENOMEM; + } else +#endif + pmd = pmd_offset(pud, addr); + + do { + next = pmd_addr_end(addr, end); + ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, + flags, stage); + pud_populate(NULL, pud, pmd); + arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); + phys += next - addr; + } while (pmd++, addr = next, addr < end); + + return ret; +} + +static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, + unsigned long addr, unsigned long end, + phys_addr_t phys, int flags, int stage) +{ + int ret = 0; + pud_t *pud; + unsigned long next; + +#ifndef __PAGETABLE_PUD_FOLDED + if (pgd_none(*pgd)) { + pud = pud_alloc_one(NULL, addr); + if (!pud) + return -ENOMEM; + } else +#endif + pud = pud_offset(pgd, addr); + + do { + next = pud_addr_end(addr, end); + ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, + flags, stage); + pgd_populate(NULL, pud, pgd); + arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); + phys += next - addr; + } while (pud++, addr = next, addr < end); + + return ret; +} + +static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int flags) +{ + int ret, stage; + unsigned long end; + phys_addr_t input_mask, output_mask; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + pgd_t *pgd = root_cfg->pgd; + struct arm_smmu_device *smmu = root_cfg->smmu; + + if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { + stage = 2; + output_mask = (1ULL << smmu->s2_output_size) - 1; + } else { + stage = 1; + output_mask = (1ULL << smmu->s1_output_size) - 1; + } + + if (!pgd) + return -EINVAL; + + if (size & ~PAGE_MASK) + return -EINVAL; + + input_mask = (1ULL << smmu->input_size) - 1; + if ((phys_addr_t)iova & ~input_mask) + return -ERANGE; + + if (paddr & ~output_mask) + return -ERANGE; + + spin_lock(&smmu_domain->lock); + pgd += pgd_index(iova); + end = iova + size; + do { + unsigned long next = pgd_addr_end(iova, end); + + ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, + flags, stage); + if (ret) + goto out_unlock; + + paddr += next - iova; + iova = next; + } while (pgd++, iova != end); + +out_unlock: + spin_unlock(&smmu_domain->lock); + + /* Ensure new page tables are visible to the hardware walker */ + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + dsb(); + + return ret; +} + +static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int flags) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; + + if (!smmu_domain || !smmu) + return -ENODEV; + + /* Check for silent address truncation up the SMMU chain. */ + if ((phys_addr_t)iova & ~smmu_domain->output_mask) + return -ERANGE; + + return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); +} + +static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size) +{ + int ret; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + struct arm_smmu_device *smmu = root_cfg->smmu; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + + ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); + writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); + arm_smmu_tlb_sync(smmu); + return ret ? ret : size; +} + +static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; + struct arm_smmu_device *smmu = root_cfg->smmu; + + spin_lock(&smmu_domain->lock); + pgd = root_cfg->pgd; + if (!pgd) + goto err_unlock; + + pgd += pgd_index(iova); + if (pgd_none_or_clear_bad(pgd)) + goto err_unlock; + + pud = pud_offset(pgd, iova); + if (pud_none_or_clear_bad(pud)) + goto err_unlock; + + pmd = pmd_offset(pud, iova); + if (pmd_none_or_clear_bad(pmd)) + goto err_unlock; + + pte = pmd_page_vaddr(*pmd) + pte_index(iova); + if (pte_none(pte)) + goto err_unlock; + + spin_unlock(&smmu_domain->lock); + return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); + +err_unlock: + spin_unlock(&smmu_domain->lock); + dev_warn(smmu->dev, + "invalid (corrupt?) page tables detected for iova 0x%llx\n", + (unsigned long long)iova); + return -EINVAL; +} + +static int arm_smmu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + unsigned long caps = 0; + struct arm_smmu_domain *smmu_domain = domain->priv; + + if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + caps |= IOMMU_CAP_CACHE_COHERENCY; + + return !!(cap & caps); +} + +static int arm_smmu_add_device(struct device *dev) +{ + struct arm_smmu_device *child, *parent, *smmu; + struct arm_smmu_master *master = NULL; + + spin_lock(&arm_smmu_devices_lock); + list_for_each_entry(parent, &arm_smmu_devices, list) { + smmu = parent; + + /* Try to find a child of the current SMMU. */ + list_for_each_entry(child, &arm_smmu_devices, list) { + if (child->parent_of_node == parent->dev->of_node) { + /* Does the child sit above our master? */ + master = find_smmu_master(child, dev->of_node); + if (master) { + smmu = NULL; + break; + } + } + } + + /* We found some children, so keep searching. */ + if (!smmu) { + master = NULL; + continue; + } + + master = find_smmu_master(smmu, dev->of_node); + if (master) + break; + } + spin_unlock(&arm_smmu_devices_lock); + + if (!master) + return -ENODEV; + + dev->archdata.iommu = smmu; + return 0; +} + +static void arm_smmu_remove_device(struct device *dev) +{ + dev->archdata.iommu = NULL; +} + +static struct iommu_ops arm_smmu_ops = { + .domain_init = arm_smmu_domain_init, + .domain_destroy = arm_smmu_domain_destroy, + .attach_dev = arm_smmu_attach_dev, + .detach_dev = arm_smmu_detach_dev, + .map = arm_smmu_map, + .unmap = arm_smmu_unmap, + .iova_to_phys = arm_smmu_iova_to_phys, + .domain_has_cap = arm_smmu_domain_has_cap, + .add_device = arm_smmu_add_device, + .remove_device = arm_smmu_remove_device, + .pgsize_bitmap = (SECTION_SIZE | + ARM_SMMU_PTE_CONT_SIZE | + PAGE_SIZE), +}; + +static void arm_smmu_device_reset(struct arm_smmu_device *smmu) +{ + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + int i = 0; + u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); + + /* Mark all SMRn as invalid and all S2CRn as bypass */ + for (i = 0; i < smmu->num_mapping_groups; ++i) { + writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); + writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); + } + + /* Invalidate the TLB, just in case */ + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + + /* Enable fault reporting */ + scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); + + /* Disable TLB broadcasting. */ + scr0 |= (sCR0_VMIDPNE | sCR0_PTM); + + /* Enable client access, but bypass when no mapping is found */ + scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG); + + /* Disable forced broadcasting */ + scr0 &= ~sCR0_FB; + + /* Don't upgrade barriers */ + scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); + + /* Push the button */ + arm_smmu_tlb_sync(smmu); + writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0); +} + +static int arm_smmu_id_size_to_bits(int size) +{ + switch (size) { + case 0: + return 32; + case 1: + return 36; + case 2: + return 40; + case 3: + return 42; + case 4: + return 44; + case 5: + default: + return 48; + } +} + +static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) +{ + unsigned long size; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + u32 id; + + dev_notice(smmu->dev, "probing hardware configuration...\n"); + + /* Primecell ID */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2); + smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1; + dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); + + /* ID0 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); +#ifndef CONFIG_64BIT + if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { + dev_err(smmu->dev, "\tno v7 descriptor support!\n"); + return -ENODEV; + } +#endif + if (id & ID0_S1TS) { + smmu->features |= ARM_SMMU_FEAT_TRANS_S1; + dev_notice(smmu->dev, "\tstage 1 translation\n"); + } + + if (id & ID0_S2TS) { + smmu->features |= ARM_SMMU_FEAT_TRANS_S2; + dev_notice(smmu->dev, "\tstage 2 translation\n"); + } + + if (id & ID0_NTS) { + smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; + dev_notice(smmu->dev, "\tnested translation\n"); + } + + if (!(smmu->features & + (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 | + ARM_SMMU_FEAT_TRANS_NESTED))) { + dev_err(smmu->dev, "\tno translation support!\n"); + return -ENODEV; + } + + if (id & ID0_CTTW) { + smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; + dev_notice(smmu->dev, "\tcoherent table walk\n"); + } + + if (id & ID0_SMS) { + u32 smr, sid, mask; + + smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; + smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & + ID0_NUMSMRG_MASK; + if (smmu->num_mapping_groups == 0) { + dev_err(smmu->dev, + "stream-matching supported, but no SMRs present!\n"); + return -ENODEV; + } + + smr = SMR_MASK_MASK << SMR_MASK_SHIFT; + smr |= (SMR_ID_MASK << SMR_ID_SHIFT); + writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); + smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + + mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; + sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; + if ((mask & sid) != sid) { + dev_err(smmu->dev, + "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", + mask, sid); + return -ENODEV; + } + + dev_notice(smmu->dev, + "\tstream matching with %u register groups, mask 0x%x", + smmu->num_mapping_groups, mask); + } + + /* ID1 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); + smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; + + /* Check that we ioremapped enough */ + size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); + size *= (smmu->pagesize << 1); + if (smmu->size < size) + dev_warn(smmu->dev, + "device is 0x%lx bytes but only mapped 0x%lx!\n", + size, smmu->size); + + smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & + ID1_NUMS2CB_MASK; + smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; + if (smmu->num_s2_context_banks > smmu->num_context_banks) { + dev_err(smmu->dev, "impossible number of S2 context banks!\n"); + return -ENODEV; + } + dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", + smmu->num_context_banks, smmu->num_s2_context_banks); + + /* ID2 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); + size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); + + /* + * Stage-1 output limited by stage-2 input size due to pgd + * allocation (PTRS_PER_PGD). + */ +#ifdef CONFIG_64BIT + /* Current maximum output size of 39 bits */ + smmu->s1_output_size = min(39UL, size); +#else + smmu->s1_output_size = min(32UL, size); +#endif + + /* The stage-2 output mask is also applied for bypass */ + size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); + smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size); + + if (smmu->version == 1) { + smmu->input_size = 32; + } else { +#ifdef CONFIG_64BIT + size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; + size = min(39, arm_smmu_id_size_to_bits(size)); +#else + size = 32; +#endif + smmu->input_size = size; + + if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || + (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || + (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { + dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", + PAGE_SIZE); + return -ENODEV; + } + } + + dev_notice(smmu->dev, + "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", + smmu->input_size, smmu->s1_output_size, smmu->s2_output_size); + return 0; +} + +static int arm_smmu_device_dt_probe(struct platform_device *pdev) +{ + struct resource *res; + struct arm_smmu_device *smmu; + struct device_node *dev_node; + struct device *dev = &pdev->dev; + struct rb_node *node; + struct of_phandle_args masterspec; + int num_irqs, i, err; + + smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); + if (!smmu) { + dev_err(dev, "failed to allocate arm_smmu_device\n"); + return -ENOMEM; + } + smmu->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "missing base address/size\n"); + return -ENODEV; + } + + smmu->size = resource_size(res); + smmu->base = devm_request_and_ioremap(dev, res); + if (!smmu->base) + return -EADDRNOTAVAIL; + + if (of_property_read_u32(dev->of_node, "#global-interrupts", + &smmu->num_global_irqs)) { + dev_err(dev, "missing #global-interrupts property\n"); + return -ENODEV; + } + + num_irqs = 0; + while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { + num_irqs++; + if (num_irqs > smmu->num_global_irqs) + smmu->num_context_irqs++; + } + + if (num_irqs < smmu->num_global_irqs) { + dev_warn(dev, "found %d interrupts but expected at least %d\n", + num_irqs, smmu->num_global_irqs); + smmu->num_global_irqs = num_irqs; + } + smmu->num_context_irqs = num_irqs - smmu->num_global_irqs; + + smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, + GFP_KERNEL); + if (!smmu->irqs) { + dev_err(dev, "failed to allocate %d irqs\n", num_irqs); + return -ENOMEM; + } + + for (i = 0; i < num_irqs; ++i) { + int irq = platform_get_irq(pdev, i); + if (irq < 0) { + dev_err(dev, "failed to get irq index %d\n", i); + return -ENODEV; + } + smmu->irqs[i] = irq; + } + + i = 0; + smmu->masters = RB_ROOT; + while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", + "#stream-id-cells", i, + &masterspec)) { + err = register_smmu_master(smmu, dev, &masterspec); + if (err) { + dev_err(dev, "failed to add master %s\n", + masterspec.np->name); + goto out_put_masters; + } + + i++; + } + dev_notice(dev, "registered %d master devices\n", i); + + if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0))) + smmu->parent_of_node = dev_node; + + err = arm_smmu_device_cfg_probe(smmu); + if (err) + goto out_put_parent; + + if (smmu->version > 1 && + smmu->num_context_banks != smmu->num_context_irqs) { + dev_err(dev, + "found only %d context interrupt(s) but %d required\n", + smmu->num_context_irqs, smmu->num_context_banks); + goto out_put_parent; + } + + arm_smmu_device_reset(smmu); + + for (i = 0; i < smmu->num_global_irqs; ++i) { + err = request_irq(smmu->irqs[i], + arm_smmu_global_fault, + IRQF_SHARED, + "arm-smmu global fault", + smmu); + if (err) { + dev_err(dev, "failed to request global IRQ %d (%u)\n", + i, smmu->irqs[i]); + goto out_free_irqs; + } + } + + INIT_LIST_HEAD(&smmu->list); + spin_lock(&arm_smmu_devices_lock); + list_add(&smmu->list, &arm_smmu_devices); + spin_unlock(&arm_smmu_devices_lock); + return 0; + +out_free_irqs: + while (i--) + free_irq(smmu->irqs[i], smmu); + +out_put_parent: + if (smmu->parent_of_node) + of_node_put(smmu->parent_of_node); + +out_put_masters: + for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { + struct arm_smmu_master *master; + master = container_of(node, struct arm_smmu_master, node); + of_node_put(master->of_node); + } + + return err; +} + +static int arm_smmu_device_remove(struct platform_device *pdev) +{ + int i; + struct device *dev = &pdev->dev; + struct arm_smmu_device *curr, *smmu = NULL; + struct rb_node *node; + + spin_lock(&arm_smmu_devices_lock); + list_for_each_entry(curr, &arm_smmu_devices, list) { + if (curr->dev == dev) { + smmu = curr; + list_del(&smmu->list); + break; + } + } + spin_unlock(&arm_smmu_devices_lock); + + if (!smmu) + return -ENODEV; + + if (smmu->parent_of_node) + of_node_put(smmu->parent_of_node); + + for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { + struct arm_smmu_master *master; + master = container_of(node, struct arm_smmu_master, node); + of_node_put(master->of_node); + } + + if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS)) + dev_err(dev, "removing device with active domains!\n"); + + for (i = 0; i < smmu->num_global_irqs; ++i) + free_irq(smmu->irqs[i], smmu); + + /* Turn the thing off */ + writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); + return 0; +} + +#ifdef CONFIG_OF +static struct of_device_id arm_smmu_of_match[] = { + { .compatible = "arm,smmu-v1", }, + { .compatible = "arm,smmu-v2", }, + { .compatible = "arm,mmu-400", }, + { .compatible = "arm,mmu-500", }, + { }, +}; +MODULE_DEVICE_TABLE(of, arm_smmu_of_match); +#endif + +static struct platform_driver arm_smmu_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "arm-smmu", + .of_match_table = of_match_ptr(arm_smmu_of_match), + }, + .probe = arm_smmu_device_dt_probe, + .remove = arm_smmu_device_remove, +}; + +static int __init arm_smmu_init(void) +{ + int ret; + + ret = platform_driver_register(&arm_smmu_driver); + if (ret) + return ret; + + /* Oh, for a proper bus abstraction */ + if (!iommu_present(&platform_bus_type)); + bus_set_iommu(&platform_bus_type, &arm_smmu_ops); + + if (!iommu_present(&amba_bustype)); + bus_set_iommu(&amba_bustype, &arm_smmu_ops); + + return 0; +} + +static void __exit arm_smmu_exit(void) +{ + return platform_driver_unregister(&arm_smmu_driver); +} + +module_init(arm_smmu_init); +module_exit(arm_smmu_exit); + +MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); +MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index a7967ceb79e..785675a56a1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -309,6 +309,7 @@ parse_dmar_table(void) struct acpi_table_dmar *dmar; struct acpi_dmar_header *entry_header; int ret = 0; + int drhd_count = 0; /* * Do it again, earlier dmar_tbl mapping could be mapped with @@ -347,6 +348,7 @@ parse_dmar_table(void) switch (entry_header->type) { case ACPI_DMAR_TYPE_HARDWARE_UNIT: + drhd_count++; ret = dmar_parse_one_drhd(entry_header); break; case ACPI_DMAR_TYPE_RESERVED_MEMORY: @@ -371,6 +373,8 @@ parse_dmar_table(void) entry_header = ((void *)entry_header + entry_header->length); } + if (drhd_count == 0) + pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); return ret; } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b4f0e28dfa4..eec0d3e04bf 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4182,14 +4182,27 @@ static int intel_iommu_add_device(struct device *dev) /* * If it's a multifunction device that does not support our - * required ACS flags, add to the same group as function 0. + * required ACS flags, add to the same group as lowest numbered + * function that also does not suport the required ACS flags. */ if (dma_pdev->multifunction && - !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) - swap_pci_ref(&dma_pdev, - pci_get_slot(dma_pdev->bus, - PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), - 0))); + !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { + u8 i, slot = PCI_SLOT(dma_pdev->devfn); + + for (i = 0; i < 8; i++) { + struct pci_dev *tmp; + + tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); + if (!tmp) + continue; + + if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { + swap_pci_ref(&dma_pdev, tmp); + break; + } + pci_dev_put(tmp); + } + } /* * Devices on the root bus go through the iommu. If that's not us, diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 5b19b2d6ec2..f71673dbb23 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -664,8 +664,7 @@ error: */ if (x2apic_present) - WARN(1, KERN_WARNING - "Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); + pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); return -1; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d8f98b14e2f..fbe9ca734f8 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_domain_has_cap); +static size_t iommu_pgsize(struct iommu_domain *domain, + unsigned long addr_merge, size_t size) +{ + unsigned int pgsize_idx; + size_t pgsize; + + /* Max page size that still fits into 'size' */ + pgsize_idx = __fls(size); + + /* need to consider alignment requirements ? */ + if (likely(addr_merge)) { + /* Max page size allowed by address */ + unsigned int align_pgsize_idx = __ffs(addr_merge); + pgsize_idx = min(pgsize_idx, align_pgsize_idx); + } + + /* build a mask of acceptable page sizes */ + pgsize = (1UL << (pgsize_idx + 1)) - 1; + + /* throw away page sizes not supported by the hardware */ + pgsize &= domain->ops->pgsize_bitmap; + + /* make sure we're still sane */ + BUG_ON(!pgsize); + + /* pick the biggest page */ + pgsize_idx = __fls(pgsize); + pgsize = 1UL << pgsize_idx; + + return pgsize; +} + int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { @@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { - pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz " - "0x%x\n", iova, (unsigned long)paddr, - (unsigned long)size, min_pagesz); + pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n", + iova, &paddr, size, min_pagesz); return -EINVAL; } - pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova, - (unsigned long)paddr, (unsigned long)size); + pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size); while (size) { - unsigned long pgsize, addr_merge = iova | paddr; - unsigned int pgsize_idx; - - /* Max page size that still fits into 'size' */ - pgsize_idx = __fls(size); - - /* need to consider alignment requirements ? */ - if (likely(addr_merge)) { - /* Max page size allowed by both iova and paddr */ - unsigned int align_pgsize_idx = __ffs(addr_merge); - - pgsize_idx = min(pgsize_idx, align_pgsize_idx); - } - - /* build a mask of acceptable page sizes */ - pgsize = (1UL << (pgsize_idx + 1)) - 1; - - /* throw away page sizes not supported by the hardware */ - pgsize &= domain->ops->pgsize_bitmap; - - /* make sure we're still sane */ - BUG_ON(!pgsize); - - /* pick the biggest page */ - pgsize_idx = __fls(pgsize); - pgsize = 1UL << pgsize_idx; + size_t pgsize = iommu_pgsize(domain, iova | paddr, size); - pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, - (unsigned long)paddr, pgsize); + pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n", + iova, &paddr, pgsize); ret = domain->ops->map(domain, iova, paddr, pgsize, prot); if (ret) @@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) * by the hardware */ if (!IS_ALIGNED(iova | size, min_pagesz)) { - pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n", - iova, (unsigned long)size, min_pagesz); + pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", + iova, size, min_pagesz); return -EINVAL; } - pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova, - (unsigned long)size); + pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); /* * Keep iterating until we either unmap 'size' bytes (or more) * or we hit an area that isn't mapped. */ while (unmapped < size) { - size_t left = size - unmapped; + size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); - unmapped_page = domain->ops->unmap(domain, iova, left); + unmapped_page = domain->ops->unmap(domain, iova, pgsize); if (!unmapped_page) break; - pr_debug("unmapped: iova 0x%lx size %lx\n", iova, - (unsigned long)unmapped_page); + pr_debug("unmapped: iova 0x%lx size 0x%zx\n", + iova, unmapped_page); iova += unmapped_page; unmapped += unmapped_page; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1760ceb68b7..19ceaa60e0f 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d, static int __cpuinit gic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { - if (action == CPU_STARTING) + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) gic_cpu_init(&gic_data[0]); return NOTIFY_OK; } diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 7f5a7cac6dc..8270388e2a0 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -136,9 +136,9 @@ config DVB_NET # This Kconfig option is used by both PCI and USB drivers config TTPCI_EEPROM - tristate - depends on I2C - default n + tristate + depends on I2C + default n source "drivers/media/dvb-core/Kconfig" @@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT If unsure say Y. +config MEDIA_ATTACH + bool + depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT + depends on MODULES + default MODULES + source "drivers/media/i2c/Kconfig" source "drivers/media/tuners/Kconfig" source "drivers/media/dvb-frontends/Kconfig" diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index cb52438e53a..9eac5310942 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd, if (fie->pad != OIF_SOURCE_PAD) return -EINVAL; - if (fie->index > ARRAY_SIZE(s5c73m3_intervals)) + if (fie->index >= ARRAY_SIZE(s5c73m3_intervals)) return -EINVAL; mutex_lock(&state->lock); diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 27d62623274..aba5b1c649e 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c @@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol, int changed = 0; u32 old; - if (core->board.audio_chip == V4L2_IDENT_WM8775) + if (core->sd_wm8775) snd_cx88_wm8775_volume_put(kcontrol, value); left = value->value.integer.value[0] & 0x3f; @@ -682,8 +682,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol, vol ^= bit; cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol); /* Pass mute onto any WM8775 */ - if ((core->board.audio_chip == V4L2_IDENT_WM8775) && - ((1<<6) == bit)) + if (core->sd_wm8775 && ((1<<6) == bit)) wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit)); ret = 1; } @@ -903,7 +902,7 @@ static int cx88_audio_initdev(struct pci_dev *pci, goto error; /* If there's a wm8775 then add a Line-In ALC switch */ - if (core->board.audio_chip == V4L2_IDENT_WM8775) + if (core->sd_wm8775) snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip)); strcpy (card->driver, "CX88x"); diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 1b00615fd39..c7a9be1065c 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -385,8 +385,7 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input) /* The wm8775 module has the "2" route hardwired into the initialization. Some boards may use different routes for different inputs. HVR-1300 surely does */ - if (core->board.audio_chip && - core->board.audio_chip == V4L2_IDENT_WM8775) { + if (core->sd_wm8775) { call_all(core, audio, s_routing, INPUT(input).audioroute, 0, 0); } @@ -771,8 +770,7 @@ static int video_open(struct file *file) cx_write(MO_GP1_IO, core->board.radio.gpio1); cx_write(MO_GP2_IO, core->board.radio.gpio2); if (core->board.radio.audioroute) { - if(core->board.audio_chip && - core->board.audio_chip == V4L2_IDENT_WM8775) { + if (core->sd_wm8775) { call_all(core, audio, s_routing, core->board.radio.audioroute, 0, 0); } @@ -959,7 +957,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl) u32 value,mask; /* Pass changes onto any WM8775 */ - if (core->board.audio_chip == V4L2_IDENT_WM8775) { + if (core->sd_wm8775) { switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: wm8775_s_ctrl(core, ctrl->id, ctrl->val); diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index 48b8d7af386..9d1481a60bd 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -576,6 +576,14 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } +static int vidioc_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct coda_ctx *ctx = fh_to_ctx(priv); + + return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create); +} + static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { @@ -610,6 +618,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = { .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, + .vidioc_create_bufs = vidioc_create_bufs, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index 1802f11e939..d0b375cf565 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -916,6 +916,21 @@ static int vpbe_display_s_fmt(struct file *file, void *priv, other video window */ layer->pix_fmt = *pixfmt; + if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) { + struct vpbe_layer *otherlayer; + + otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer); + /* if other layer is available, only + * claim it, do not configure it + */ + ret = osd_device->ops.request_layer(osd_device, + otherlayer->layer_info.id); + if (ret < 0) { + v4l2_err(&vpbe_dev->v4l2_dev, + "Display Manager failed to allocate layer\n"); + return -EBUSY; + } + } /* Get osd layer config */ osd_device->ops.get_layer_config(osd_device, diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 8c50d307486..93609091cb2 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev) if (NULL == ccdc_cfg) { v4l2_err(pdev->dev.driver, "Memory allocation failed for ccdc_cfg\n"); - goto probe_free_lock; + goto probe_free_dev_mem; } mutex_lock(&ccdc_lock); @@ -1991,7 +1991,6 @@ probe_out_release_irq: free_irq(vpfe_dev->ccdc_irq0, vpfe_dev); probe_free_ccdc_cfg_mem: kfree(ccdc_cfg); -probe_free_lock: mutex_unlock(&ccdc_lock); probe_free_dev_mem: kfree(vpfe_dev); diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c index b0ff67bc1b0..d05eaa2c849 100644 --- a/drivers/media/platform/exynos4-is/fimc-is-regs.c +++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c @@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is) HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO, }; - if (WARN_ON(is->config_index > ARRAY_SIZE(cmd))) + if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd))) return -EINVAL; mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0)); diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 47c6363d04e..0741945b79e 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -48,7 +48,6 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = { [ISS_CLK_LITE0] = "lite0", [ISS_CLK_LITE1] = "lite1", [ISS_CLK_MPLL] = "mpll", - [ISS_CLK_SYSREG] = "sysreg", [ISS_CLK_ISP] = "isp", [ISS_CLK_DRC] = "drc", [ISS_CLK_FD] = "fd", @@ -71,7 +70,6 @@ static void fimc_is_put_clocks(struct fimc_is *is) for (i = 0; i < ISS_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; - clk_unprepare(is->clocks[i]); clk_put(is->clocks[i]); is->clocks[i] = ERR_PTR(-EINVAL); } @@ -90,12 +88,6 @@ static int fimc_is_get_clocks(struct fimc_is *is) ret = PTR_ERR(is->clocks[i]); goto err; } - ret = clk_prepare(is->clocks[i]); - if (ret < 0) { - clk_put(is->clocks[i]); - is->clocks[i] = ERR_PTR(-EINVAL); - goto err; - } } return 0; @@ -103,7 +95,7 @@ err: fimc_is_put_clocks(is); dev_err(&is->pdev->dev, "failed to get clock: %s\n", fimc_is_clocks[i]); - return -ENXIO; + return ret; } static int fimc_is_setup_clocks(struct fimc_is *is) @@ -144,7 +136,7 @@ int fimc_is_enable_clocks(struct fimc_is *is) for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (IS_ERR(is->clocks[i])) continue; - ret = clk_enable(is->clocks[i]); + ret = clk_prepare_enable(is->clocks[i]); if (ret < 0) { dev_err(&is->pdev->dev, "clock %s enable failed\n", fimc_is_clocks[i]); @@ -163,7 +155,7 @@ void fimc_is_disable_clocks(struct fimc_is *is) for (i = 0; i < ISS_GATE_CLKS_MAX; i++) { if (!IS_ERR(is->clocks[i])) { - clk_disable(is->clocks[i]); + clk_disable_unprepare(is->clocks[i]); pr_debug("disabled clock: %s\n", fimc_is_clocks[i]); } } @@ -326,6 +318,11 @@ int fimc_is_start_firmware(struct fimc_is *is) struct device *dev = &is->pdev->dev; int ret; + if (is->fw.f_w == NULL) { + dev_err(dev, "firmware is not loaded\n"); + return -EINVAL; + } + memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); wmb(); @@ -837,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev) goto err_clk; } pm_runtime_enable(dev); - /* - * Enable only the ISP power domain, keep FIMC-IS clocks off until - * the whole clock tree is configured. The ISP power domain needs - * be active in order to acces any CMU_ISP clock registers. - */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto err_irq; - - ret = fimc_is_setup_clocks(is); - pm_runtime_put_sync(dev); + ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_irq; - is->clk_init = true; - is->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(is->alloc_ctx)) { ret = PTR_ERR(is->alloc_ctx); @@ -875,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev) if (ret < 0) goto err_dfs; + pm_runtime_put_sync(dev); + dev_dbg(dev, "FIMC-IS registered successfully\n"); return 0; @@ -894,9 +881,11 @@ err_clk: static int fimc_is_runtime_resume(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); + int ret; - if (!is->clk_init) - return 0; + ret = fimc_is_setup_clocks(is); + if (ret) + return ret; return fimc_is_enable_clocks(is); } @@ -905,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); - if (is->clk_init) - fimc_is_disable_clocks(is); - + fimc_is_disable_clocks(is); return 0; } @@ -941,7 +928,8 @@ static int fimc_is_remove(struct platform_device *pdev) vb2_dma_contig_cleanup_ctx(is->alloc_ctx); fimc_is_put_clocks(is); fimc_is_debugfs_remove(is); - release_firmware(is->fw.f_w); + if (is->fw.f_w) + release_firmware(is->fw.f_w); fimc_is_free_cpu_memory(is); return 0; diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h index f5275a5b015..d7db133b493 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.h +++ b/drivers/media/platform/exynos4-is/fimc-is.h @@ -73,7 +73,6 @@ enum { ISS_CLK_LITE0, ISS_CLK_LITE1, ISS_CLK_MPLL, - ISS_CLK_SYSREG, ISS_CLK_ISP, ISS_CLK_DRC, ISS_CLK_FD, @@ -265,7 +264,6 @@ struct fimc_is { spinlock_t slock; struct clk *clocks[ISS_CLKS_MAX]; - bool clk_init; void __iomem *regs; void __iomem *pmu_regs; int irq; diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c index d63947f7b30..7ede30b5910 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp.c +++ b/drivers/media/platform/exynos4-is/fimc-isp.c @@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd, return 0; } - mf->colorspace = V4L2_COLORSPACE_JPEG; + mf->colorspace = V4L2_COLORSPACE_SRGB; mutex_lock(&isp->subdev_lock); __is_get_frame_size(is, &cur_fmt); @@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd, v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n", __func__, fmt->pad, mf->code, mf->width, mf->height); - mf->colorspace = V4L2_COLORSPACE_JPEG; + mf->colorspace = V4L2_COLORSPACE_SRGB; mutex_lock(&isp->subdev_lock); __isp_subdev_try_format(isp, fmt); diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index a2eda9d5ac8..254d70fe762 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, node = v4l2_of_get_next_endpoint(node, NULL); if (!node) { dev_err(&pdev->dev, "No port node at %s\n", - node->full_name); + pdev->dev.of_node->full_name); return -EINVAL; } /* Get port node and validate MIPI-CSI channel id. */ diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h index 261134baa65..35d2fcdc003 100644 --- a/drivers/media/platform/s3c-camif/camif-core.h +++ b/drivers/media/platform/s3c-camif/camif-core.h @@ -229,7 +229,7 @@ struct camif_vp { unsigned int state; u16 fmt_flags; u8 id; - u8 rotation; + u16 rotation; u8 hflip; u8 vflip; unsigned int offset; diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile index ddc2900d88a..d18cb5edd2d 100644 --- a/drivers/media/platform/s5p-jpeg/Makefile +++ b/drivers/media/platform/s5p-jpeg/Makefile @@ -1,2 +1,2 @@ s5p-jpeg-objs := jpeg-core.o -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile index 379008c6d09..15f59b324fe 100644 --- a/drivers/media/platform/s5p-mfc/Makefile +++ b/drivers/media/platform/s5p-mfc/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 01f9ae0dadb..d12faa691af 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, leave_handle_frame: spin_unlock_irqrestore(&dev->irqlock, flags); if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) - || ctx->dst_queue_cnt < ctx->dpb_count) + || ctx->dst_queue_cnt < ctx->pb_count) clear_work_bit(ctx); s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); wake_up_ctx(ctx, reason, err); @@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx); - ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, + ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, dev); ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count, dev); @@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx, struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *mb_entry; - mfc_debug(2, "Stream completed"); + mfc_debug(2, "Stream completed\n"); s5p_mfc_clear_int_flags(dev); ctx->int_type = reason; @@ -1362,7 +1362,6 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = { .port_num = MFC_NUM_PORTS, .buf_size = &buf_size_v5, .buf_align = &mfc_buf_align_v5, - .mclk_name = "sclk_mfc", .fw_name = "s5p-mfc.fw", }; @@ -1389,7 +1388,6 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = { .port_num = MFC_NUM_PORTS_V6, .buf_size = &buf_size_v6, .buf_align = &mfc_buf_align_v6, - .mclk_name = "aclk_333", .fw_name = "s5p-mfc-v6.fw", }; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 202d1d7a37a..ef4074cd531 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -138,6 +138,7 @@ enum s5p_mfc_inst_state { MFCINST_INIT = 100, MFCINST_GOT_INST, MFCINST_HEAD_PARSED, + MFCINST_HEAD_PRODUCED, MFCINST_BUFS_SET, MFCINST_RUNNING, MFCINST_FINISHING, @@ -231,7 +232,6 @@ struct s5p_mfc_variant { unsigned int port_num; struct s5p_mfc_buf_size *buf_size; struct s5p_mfc_buf_align *buf_align; - char *mclk_name; char *fw_name; }; @@ -438,7 +438,7 @@ struct s5p_mfc_enc_params { u32 rc_framerate_num; u32 rc_framerate_denom; - union { + struct { struct s5p_mfc_h264_enc_params h264; struct s5p_mfc_mpeg4_enc_params mpeg4; } codec; @@ -602,7 +602,7 @@ struct s5p_mfc_ctx { int after_packed_pb; int sei_fp_parse; - int dpb_count; + int pb_count; int total_dpb_count; int mv_count; /* Buffers */ diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 2e5f30b40de..dc1fc94a488 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev) dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size, &dev->bank1, GFP_KERNEL); - if (IS_ERR(dev->fw_virt_addr)) { + if (IS_ERR_OR_NULL(dev->fw_virt_addr)) { dev->fw_virt_addr = NULL; mfc_err("Allocating bitprocessor buffer failed\n"); return -ENOMEM; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h index bd5cd4ae993..8e608f5aa0d 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h @@ -30,8 +30,8 @@ extern int debug; #define mfc_debug(level, fmt, args...) #endif -#define mfc_debug_enter() mfc_debug(5, "enter") -#define mfc_debug_leave() mfc_debug(5, "leave") +#define mfc_debug_enter() mfc_debug(5, "enter\n") +#define mfc_debug_leave() mfc_debug(5, "leave\n") #define mfc_err(fmt, args...) \ do { \ diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 4af53bd2f18..00b07032f4f 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) /* Context is to decode a frame */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_RUNNING && - ctx->dst_queue_cnt >= ctx->dpb_count) + ctx->dst_queue_cnt >= ctx->pb_count) return 1; /* Context is to return last frame */ if (ctx->state == MFCINST_FINISHING && - ctx->dst_queue_cnt >= ctx->dpb_count) + ctx->dst_queue_cnt >= ctx->pb_count) return 1; /* Context is to set buffers */ if (ctx->src_queue_cnt >= 1 && @@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) /* Resolution change */ if ((ctx->state == MFCINST_RES_CHANGE_INIT || ctx->state == MFCINST_RES_CHANGE_FLUSH) && - ctx->dst_queue_cnt >= ctx->dpb_count) + ctx->dst_queue_cnt >= ctx->pb_count) return 1; if (ctx->state == MFCINST_RES_CHANGE_END && ctx->src_queue_cnt >= 1) @@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv, mfc_err("vb2_reqbufs on capture failed\n"); return ret; } - if (reqbufs->count < ctx->dpb_count) { + if (reqbufs->count < ctx->pb_count) { mfc_err("Not enough buffers allocated\n"); reqbufs->count = 0; s5p_mfc_clock_on(); @@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { - ctrl->val = ctx->dpb_count; + ctrl->val = ctx->pb_count; break; } else if (ctx->state != MFCINST_INIT) { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); @@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0); if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { - ctrl->val = ctx->dpb_count; + ctrl->val = ctx->pb_count; } else { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); return -EINVAL; @@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq, /* Output plane count is 2 - one for Y and one for CbCr */ *plane_count = 2; /* Setup buffer count */ - if (*buf_count < ctx->dpb_count) - *buf_count = ctx->dpb_count; - if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB) - *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB; + if (*buf_count < ctx->pb_count) + *buf_count = ctx->pb_count; + if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB) + *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB; if (*buf_count > MFC_MAX_BUFFERS) *buf_count = MFC_MAX_BUFFERS; } else { diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 4f6b553c4b2..2549967b2f8 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -592,7 +592,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) return 1; /* context is ready to encode a frame */ if ((ctx->state == MFCINST_RUNNING || - ctx->state == MFCINST_HEAD_PARSED) && + ctx->state == MFCINST_HEAD_PRODUCED) && ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1) return 1; /* context is ready to encode remaining frames */ @@ -649,6 +649,7 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_buf *dst_mb; unsigned long flags; + unsigned int enc_pb_count; if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) { spin_lock_irqsave(&dev->irqlock, flags); @@ -661,18 +662,19 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE); spin_unlock_irqrestore(&dev->irqlock, flags); } - if (IS_MFCV6(dev)) { - ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */ - } else { + + if (!IS_MFCV6(dev)) { ctx->state = MFCINST_RUNNING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); - } - - if (IS_MFCV6(dev)) - ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, + } else { + enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_enc_dpb_count, dev); + if (ctx->pb_count < enc_pb_count) + ctx->pb_count = enc_pb_count; + ctx->state = MFCINST_HEAD_PRODUCED; + } return 0; } @@ -717,9 +719,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); - mfc_debug(2, "Encoded slice type: %d", slice_type); - mfc_debug(2, "Encoded stream size: %d", strm_size); - mfc_debug(2, "Display order: %d", + mfc_debug(2, "Encoded slice type: %d\n", slice_type); + mfc_debug(2, "Encoded stream size: %d\n", strm_size); + mfc_debug(2, "Display order: %d\n", mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); spin_lock_irqsave(&dev->irqlock, flags); if (slice_type >= 0) { @@ -1055,15 +1057,13 @@ static int vidioc_reqbufs(struct file *file, void *priv, } ctx->capture_state = QUEUE_BUFS_REQUESTED; - if (!IS_MFCV6(dev)) { - ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, - alloc_codec_buffers, ctx); - if (ret) { - mfc_err("Failed to allocate encoding buffers\n"); - reqbufs->count = 0; - ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); - return -ENOMEM; - } + ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, + alloc_codec_buffers, ctx); + if (ret) { + mfc_err("Failed to allocate encoding buffers\n"); + reqbufs->count = 0; + ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); + return -ENOMEM; } } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (ctx->output_state != QUEUE_FREE) { @@ -1071,6 +1071,19 @@ static int vidioc_reqbufs(struct file *file, void *priv, ctx->output_state); return -EINVAL; } + + if (IS_MFCV6(dev)) { + /* Check for min encoder buffers */ + if (ctx->pb_count && + (reqbufs->count < ctx->pb_count)) { + reqbufs->count = ctx->pb_count; + mfc_debug(2, "Minimum %d output buffers needed\n", + ctx->pb_count); + } else { + ctx->pb_count = reqbufs->count; + } + } + ret = vb2_reqbufs(&ctx->vq_src, reqbufs); if (ret != 0) { mfc_err("error in vb2_reqbufs() for E(S)\n"); @@ -1533,14 +1546,14 @@ int vidioc_encoder_cmd(struct file *file, void *priv, spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { - mfc_debug(2, "EOS: empty src queue, entering finishing state"); + mfc_debug(2, "EOS: empty src queue, entering finishing state\n"); ctx->state = MFCINST_FINISHING; if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); spin_unlock_irqrestore(&dev->irqlock, flags); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); } else { - mfc_debug(2, "EOS: marking last buffer of stream"); + mfc_debug(2, "EOS: marking last buffer of stream\n"); buf = list_entry(ctx->src_queue.prev, struct s5p_mfc_buf, list); if (buf->flags & MFC_BUF_FLAG_USED) @@ -1609,9 +1622,9 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb) mfc_err("failed to get plane cookie\n"); return -EINVAL; } - mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx", - vb->v4l2_buf.index, i, - vb2_dma_contig_plane_dma_addr(vb, i)); + mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n", + vb->v4l2_buf.index, i, + vb2_dma_contig_plane_dma_addr(vb, i)); } return 0; } @@ -1760,11 +1773,27 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; - v4l2_ctrl_handler_setup(&ctx->ctrl_handler); + if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) { + + if ((ctx->state == MFCINST_GOT_INST) && + (dev->curr_ctx == ctx->num) && dev->hw_lock) { + s5p_mfc_wait_for_done_ctx(ctx, + S5P_MFC_R2H_CMD_SEQ_DONE_RET, + 0); + } + + if (ctx->src_bufs_cnt < ctx->pb_count) { + mfc_err("Need minimum %d OUTPUT buffers\n", + ctx->pb_count); + return -EINVAL; + } + } + /* If context is ready then dev = work->data;schedule it to run */ if (s5p_mfc_ctx_ready(ctx)) set_work_bit_irqsave(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); + return 0; } @@ -1920,6 +1949,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx) if (controls[i].is_volatile && ctx->ctrls[i]) ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; } + v4l2_ctrl_handler_setup(&ctx->ctrl_handler); return 0; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 0af05a2d1cd..368582b091b 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1275,8 +1275,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); - mfc_debug(2, "encoding buffer with index=%d state=%d", - src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); + mfc_debug(2, "encoding buffer with index=%d state=%d\n", + src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); s5p_mfc_encode_one_frame_v5(ctx); return 0; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index 7e76fce2e52..66f0d042357 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -62,12 +62,6 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx) /* NOP */ } -static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev) -{ - /* NOP */ - return -1; -} - /* Allocate codec buffers */ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) { @@ -167,7 +161,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + - (ctx->dpb_count * (ctx->luma_dpb_size + + (ctx->pb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; @@ -181,7 +175,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6); ctx->bank1.size = ctx->scratch_buf_size + ctx->tmv_buffer_size + - (ctx->dpb_count * (ctx->luma_dpb_size + + (ctx->pb_count * (ctx->luma_dpb_size + ctx->chroma_dpb_size + ctx->me_buffer_size)); ctx->bank2.size = 0; break; @@ -198,7 +192,6 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) } BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); } - return 0; } @@ -449,8 +442,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx, WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */ WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6); - mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d", - addr, size); + mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n", + addr, size); return 0; } @@ -463,8 +456,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */ WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6); - mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr); - mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr); + mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr); + mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr); } static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, @@ -479,8 +472,8 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6); enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6); - mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr); - mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr); + mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr); + mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr); } /* Set encoding ref & codec buffer */ @@ -497,7 +490,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1); - for (i = 0; i < ctx->dpb_count; i++) { + for (i = 0; i < ctx->pb_count; i++) { WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i)); buf_addr1 += ctx->luma_dpb_size; WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i)); @@ -520,7 +513,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) buf_size1 -= ctx->tmv_buffer_size; mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n", - buf_addr1, buf_size1, ctx->dpb_count); + buf_addr1, buf_size1, ctx->pb_count); if (buf_size1 < 0) { mfc_debug(2, "Not enough memory has been allocated.\n"); return -ENOMEM; @@ -1431,8 +1424,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); - mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr); - mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr); + mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr); + mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr); s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr); @@ -1522,22 +1515,6 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx) struct s5p_mfc_dev *dev = ctx->dev; int ret; - ret = s5p_mfc_alloc_codec_buffers_v6(ctx); - if (ret) { - mfc_err("Failed to allocate encoding buffers.\n"); - return -ENOMEM; - } - - /* Header was generated now starting processing - * First set the reference frame buffers - */ - if (ctx->capture_state != QUEUE_BUFS_REQUESTED) { - mfc_err("It seems that destionation buffers were not\n" - "requested.MFC requires that header should be generated\n" - "before allocating codec buffer.\n"); - return -EAGAIN; - } - dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_set_enc_ref_buffer_v6(ctx); @@ -1582,7 +1559,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) mfc_debug(1, "Seting new context to %p\n", ctx); /* Got context to run in ctx */ mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n", - ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt); + ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt); mfc_debug(1, "ctx->state=%d\n", ctx->state); /* Last frame has already been sent to MFC * Now obtaining frames from MFC buffer */ @@ -1647,7 +1624,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev) case MFCINST_GOT_INST: s5p_mfc_run_init_enc(ctx); break; - case MFCINST_HEAD_PARSED: /* Only for MFC6.x */ + case MFCINST_HEAD_PRODUCED: ret = s5p_mfc_run_init_enc_buffers(ctx); break; default: @@ -1730,7 +1707,7 @@ static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev) return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6); } -static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev) +static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6); } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c index 6aa38a56aaf..11d5f1dada3 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c @@ -50,19 +50,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev) goto err_p_ip_clk; } - pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name); - if (IS_ERR(pm->clock)) { - mfc_err("Failed to get MFC clock\n"); - ret = PTR_ERR(pm->clock); - goto err_g_ip_clk_2; - } - - ret = clk_prepare(pm->clock); - if (ret) { - mfc_err("Failed to prepare MFC clock\n"); - goto err_p_ip_clk_2; - } - atomic_set(&pm->power, 0); #ifdef CONFIG_PM_RUNTIME pm->device = &dev->plat_dev->dev; @@ -72,10 +59,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev) atomic_set(&clk_ref, 0); #endif return 0; -err_p_ip_clk_2: - clk_put(pm->clock); -err_g_ip_clk_2: - clk_unprepare(pm->clock_gate); err_p_ip_clk: clk_put(pm->clock_gate); err_g_ip_clk: @@ -86,8 +69,6 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev) { clk_unprepare(pm->clock_gate); clk_put(pm->clock_gate); - clk_unprepare(pm->clock); - clk_put(pm->clock); #ifdef CONFIG_PM_RUNTIME pm_runtime_disable(pm->device); #endif @@ -98,7 +79,7 @@ int s5p_mfc_clock_on(void) int ret; #ifdef CLK_DEBUG atomic_inc(&clk_ref); - mfc_debug(3, "+ %d", atomic_read(&clk_ref)); + mfc_debug(3, "+ %d\n", atomic_read(&clk_ref)); #endif ret = clk_enable(pm->clock_gate); return ret; @@ -108,7 +89,7 @@ void s5p_mfc_clock_off(void) { #ifdef CLK_DEBUG atomic_dec(&clk_ref); - mfc_debug(3, "- %d", atomic_read(&clk_ref)); + mfc_debug(3, "- %d\n", atomic_read(&clk_ref)); #endif clk_disable(pm->clock_gate); } diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index 0b32cc3f6a4..59a9deefb24 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -905,11 +905,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq, if (ftmp.fmt.pix.width != pix->width || ftmp.fmt.pix.height != pix->height) return -EINVAL; - size = pix->bytesperline ? pix->bytesperline * pix->height : - pix->width * pix->height * fmt->depth >> 3; + size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth : + pix->width * pix->height * fmt->depth / fmt->ydepth; } else { vfmt = sh_veu_get_vfmt(veu, vq->type); - size = vfmt->bytesperline * vfmt->frame.height; + size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth; } if (count < 2) @@ -1033,8 +1033,6 @@ static int sh_veu_release(struct file *file) dev_dbg(veu->dev, "Releasing instance %p\n", veu_file); - pm_runtime_put(veu->dev); - if (veu_file == veu->capture) { veu->capture = NULL; vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)); @@ -1050,6 +1048,8 @@ static int sh_veu_release(struct file *file) veu->m2m_ctx = NULL; } + pm_runtime_put(veu->dev); + kfree(veu_file); return 0; @@ -1138,10 +1138,7 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id) veu->xaction++; - if (!veu->aborting) - return IRQ_WAKE_THREAD; - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static int sh_veu_probe(struct platform_device *pdev) diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index eea832c5fd0..3a4efbdc766 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -643,9 +643,9 @@ static int soc_camera_close(struct file *file) if (ici->ops->init_videobuf2) vb2_queue_release(&icd->vb2_vidq); - ici->ops->remove(icd); - __soc_camera_power_off(icd); + + ici->ops->remove(icd); } if (icd->streamer == file) diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig index c0beee2fa37..d529ba788f4 100644 --- a/drivers/media/radio/Kconfig +++ b/drivers/media/radio/Kconfig @@ -22,6 +22,7 @@ config RADIO_SI476X tristate "Silicon Laboratories Si476x I2C FM Radio" depends on I2C && VIDEO_V4L2 depends on MFD_SI476X_CORE + depends on SND_SOC select SND_SOC_SI476X ---help--- Choose Y here if you have this FM radio chip. diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 9430c6a2993..9dc8bafe648 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -44,7 +44,7 @@ #define FREQ_MUL (10000000 / 625) -#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status)) +#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status)) #define DRIVER_NAME "si476x-radio" #define DRIVER_CARD "SI476x AM/FM Receiver" diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig index f6768cad001..15665debc57 100644 --- a/drivers/media/tuners/Kconfig +++ b/drivers/media/tuners/Kconfig @@ -1,23 +1,3 @@ -config MEDIA_ATTACH - bool "Load and attach frontend and tuner driver modules as needed" - depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT - depends on MODULES - default y if !EXPERT - help - Remove the static dependency of DVB card drivers on all - frontend modules for all possible card variants. Instead, - allow the card drivers to only load the frontend modules - they require. - - Also, tuner module will automatically load a tuner driver - when needed, for analog mode. - - This saves several KBytes of memory. - - Note: You will need module-init-tools v3.2 or later for this feature. - - If unsure say Y. - # Analog TV tuners, auto-loaded via tuner.ko config MEDIA_TUNER tristate diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 22015fe1a0f..2cc8ec70e3b 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; - struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf}; + struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; dev_dbg(&d->udev->dev, "%s:\n", __func__); @@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) goto found; } - /* check R820T by reading tuner stats at I2C addr 0x1a */ + /* check R820T ID register; reg=00 val=69 */ ret = rtl28xxu_ctrl_msg(d, &req_r820t); - if (ret == 0) { + if (ret == 0 && buf[0] == 0x69) { priv->tuner = TUNER_RTL2832_R820T; priv->tuner_name = "R820T"; goto found; diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 3fe207e038c..d7ff3b9687c 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -1159,6 +1159,13 @@ static int sd_start(struct gspca_dev *gspca_dev) regs[0x01] = 0x44; /* Select 24 Mhz clock */ regs[0x12] = 0x02; /* Set hstart to 2 */ } + break; + case SENSOR_PAS202: + /* For some unknown reason we need to increase hstart by 1 on + the sn9c103, otherwise we get wrong colors (bayer shift). */ + if (sd->bridge == BRIDGE_103) + regs[0x12] += 1; + break; } /* Disable compression when the raw bayer format has been selected */ if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h index 7a6a0d39c2c..81b017a554b 100644 --- a/drivers/media/usb/pwc/pwc.h +++ b/drivers/media/usb/pwc/pwc.h @@ -226,7 +226,7 @@ struct pwc_device struct list_head queued_bufs; spinlock_t queued_bufs_lock; /* Protects queued_bufs */ - /* Note if taking both locks v4l2_lock must always be locked first! */ + /* If taking both locks vb_queue_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */ diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index ebb8e48619a..fccd08b66d1 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1835,6 +1835,8 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) { if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) return true; + if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) + return true; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_VOLUME: diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index f81bda1a48e..7658586fe5f 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -243,7 +243,6 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_vbi_format *vbi; const struct v4l2_sliced_vbi_format *sliced; const struct v4l2_window *win; - const struct v4l2_clip *clip; unsigned i; pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -253,7 +252,7 @@ static void v4l_print_format(const void *arg, bool write_only) pix = &p->fmt.pix; pr_cont(", width=%u, height=%u, " "pixelformat=%c%c%c%c, field=%s, " - "bytesperline=%u sizeimage=%u, colorspace=%d\n", + "bytesperline=%u, sizeimage=%u, colorspace=%d\n", pix->width, pix->height, (pix->pixelformat & 0xff), (pix->pixelformat >> 8) & 0xff, @@ -284,20 +283,14 @@ static void v4l_print_format(const void *arg, bool write_only) case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: win = &p->fmt.win; - pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, " - "chromakey=0x%08x, bitmap=%p, " - "global_alpha=0x%02x\n", - win->w.width, win->w.height, - win->w.left, win->w.top, + /* Note: we can't print the clip list here since the clips + * pointer is a userspace pointer, not a kernelspace + * pointer. */ + pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n", + win->w.width, win->w.height, win->w.left, win->w.top, prt_names(win->field, v4l2_field_names), - win->chromakey, win->bitmap, win->global_alpha); - clip = win->clips; - for (i = 0; i < win->clipcount; i++) { - printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n", - i, clip->c.width, clip->c.height, - clip->c.left, clip->c.top); - clip = clip->next; - } + win->chromakey, win->clipcount, win->clips, + win->bitmap, win->global_alpha); break; case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: @@ -332,7 +325,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only) pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, " "height=%u, pixelformat=%c%c%c%c, " - "bytesperline=%u sizeimage=%u, colorspace=%d\n", + "bytesperline=%u, sizeimage=%u, colorspace=%d\n", p->capability, p->flags, p->base, p->fmt.width, p->fmt.height, (p->fmt.pixelformat & 0xff), @@ -353,7 +346,7 @@ static void v4l_print_modulator(const void *arg, bool write_only) const struct v4l2_modulator *p = arg; if (write_only) - pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans); + pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans); else pr_cont("index=%u, name=%.*s, capability=0x%x, " "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n", @@ -445,13 +438,13 @@ static void v4l_print_buffer(const void *arg, bool write_only) for (i = 0; i < p->length; ++i) { plane = &p->m.planes[i]; printk(KERN_DEBUG - "plane %d: bytesused=%d, data_offset=0x%08x " + "plane %d: bytesused=%d, data_offset=0x%08x, " "offset/userptr=0x%lx, length=%d\n", i, plane->bytesused, plane->data_offset, plane->m.userptr, plane->length); } } else { - pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n", + pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n", p->bytesused, p->m.userptr, p->length); } @@ -504,6 +497,8 @@ static void v4l_print_streamparm(const void *arg, bool write_only) c->capability, c->outputmode, c->timeperframe.numerator, c->timeperframe.denominator, c->extendedmode, c->writebuffers); + } else { + pr_cont("\n"); } } @@ -734,11 +729,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only) p->type); switch (p->type) { case V4L2_FRMSIZE_TYPE_DISCRETE: - pr_cont(" wxh=%ux%u\n", + pr_cont(", wxh=%ux%u\n", p->discrete.width, p->discrete.height); break; case V4L2_FRMSIZE_TYPE_STEPWISE: - pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n", + pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", p->stepwise.min_width, p->stepwise.min_height, p->stepwise.step_width, p->stepwise.step_height, p->stepwise.max_width, p->stepwise.max_height); @@ -764,12 +759,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only) p->width, p->height, p->type); switch (p->type) { case V4L2_FRMIVAL_TYPE_DISCRETE: - pr_cont(" fps=%d/%d\n", + pr_cont(", fps=%d/%d\n", p->discrete.numerator, p->discrete.denominator); break; case V4L2_FRMIVAL_TYPE_STEPWISE: - pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n", + pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n", p->stepwise.min.numerator, p->stepwise.min.denominator, p->stepwise.max.numerator, @@ -807,8 +802,8 @@ static void v4l_print_event(const void *arg, bool write_only) pr_cont("value64=%lld, ", c->value64); else pr_cont("value=%d, ", c->value); - pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d," - " default_value=%d\n", + pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, " + "default_value=%d\n", c->flags, c->minimum, c->maximum, c->step, c->default_value); break; @@ -845,7 +840,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only) const struct v4l2_frequency_band *p = arg; pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, " - "rangelow=%u, rangehigh=%u, modulation=0x%x\n", + "rangelow=%u, rangehigh=%u, modulation=0x%x\n", p->tuner, p->type, p->index, p->capability, p->rangelow, p->rangehigh, p->modulation); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 66f599fcb82..e96497f7c3e 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -205,7 +205,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) { struct v4l2_m2m_dev *m2m_dev; - unsigned long flags_job, flags; + unsigned long flags_job, flags_out, flags_cap; m2m_dev = m2m_ctx->m2m_dev; dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); @@ -223,23 +223,26 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) return; } - spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, + flags_out); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No input buffers available\n"); return; } - spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, + flags_cap); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, + flags_out); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("No output buffers available\n"); return; } - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); if (m2m_dev->m2m_ops->job_ready && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { @@ -372,6 +375,20 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); /** + * v4l2_m2m_create_bufs() - create a source or destination buffer, depending + * on the type + */ +int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_create_buffers *create) +{ + struct vb2_queue *vq; + + vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); + return vb2_create_bufs(vq, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); + +/** * v4l2_m2m_expbuf() - export a source or destination buffer, depending on * the type */ @@ -486,8 +503,10 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, if (m2m_ctx->m2m_dev->m2m_ops->unlock) m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); - poll_wait(file, &src_q->done_wq, wait); - poll_wait(file, &dst_q->done_wq, wait); + if (list_empty(&src_q->done_list)) + poll_wait(file, &src_q->done_wq, wait); + if (list_empty(&dst_q->done_list)) + poll_wait(file, &dst_q->done_wq, wait); if (m2m_ctx->m2m_dev->m2m_ops->lock) m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 7d833eefaf4..e3bdc3be91e 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -2014,7 +2014,8 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) if (list_empty(&q->queued_list)) return res | POLLERR; - poll_wait(file, &q->done_wq, wait); + if (list_empty(&q->done_list)) + poll_wait(file, &q->done_wq, wait); /* * Take first buffer available for dequeuing. diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 6e8bc9d88c4..94d957d203a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) file->f_pos += offset; break; case 2: - file->f_pos = debug->buffer_len - offset; + file->f_pos = debug->buffer_len + offset; break; default: return -EINVAL; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 9544cdc0d1a..e79e006eb9a 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -811,6 +811,70 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) return pcidev->irq; } +static struct iosapic_info *first_isi = NULL; + +#ifdef CONFIG_64BIT +int iosapic_serial_irq(int num) +{ + struct iosapic_info *isi = first_isi; + struct irt_entry *irte = NULL; /* only used if PAT PDC */ + struct vector_info *vi; + int isi_line; /* line used by device */ + + /* lookup IRT entry for isi/slot/pin set */ + irte = &irt_cell[num]; + + DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", + irte, + irte->entry_type, + irte->entry_length, + irte->polarity_trigger, + irte->src_bus_irq_devno, + irte->src_bus_id, + irte->src_seg_id, + irte->dest_iosapic_intin, + (u32) irte->dest_iosapic_addr); + isi_line = irte->dest_iosapic_intin; + + /* get vector info for this input line */ + vi = isi->isi_vector + isi_line; + DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); + + /* If this IRQ line has already been setup, skip it */ + if (vi->irte) + goto out; + + vi->irte = irte; + + /* + * Allocate processor IRQ + * + * XXX/FIXME The txn_alloc_irq() code and related code should be + * moved to enable_irq(). That way we only allocate processor IRQ + * bits for devices that actually have drivers claiming them. + * Right now we assign an IRQ to every PCI device present, + * regardless of whether it's used or not. + */ + vi->txn_irq = txn_alloc_irq(8); + + if (vi->txn_irq < 0) + panic("I/O sapic: couldn't get TXN IRQ\n"); + + /* enable_irq() will use txn_* to program IRdT */ + vi->txn_addr = txn_alloc_addr(vi->txn_irq); + vi->txn_data = txn_alloc_data(vi->txn_irq); + + vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; + vi->eoi_data = cpu_to_le32(vi->txn_data); + + cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); + + out: + + return vi->txn_irq; +} +#endif + /* ** squirrel away the I/O Sapic Version @@ -877,6 +941,8 @@ void *iosapic_register(unsigned long hpa) vip->irqline = (unsigned char) cnt; vip->iosapic = isi; } + if (!first_isi) + first_isi = isi; return isi; } diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 439c012be76..b63d534192e 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig) file->f_pos += offset; break; case 2: - file->f_pos = debug->buffer_len - offset; + file->f_pos = debug->buffer_len + offset; break; default: return -EINVAL; diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index adc1f7f471f..85e1ffd0e5c 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file, pos = file->f_pos + offset; break; case 2: - pos = fnic_dbg_prt->buffer_len - offset; + pos = fnic_dbg_prt->buffer_len + offset; } return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ? -EINVAL : (file->f_pos = pos); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index f63f5ff7f27..f525ecb7a9c 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) pos = file->f_pos + off; break; case 2: - pos = debug->len - off; + pos = debug->len + off; } return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); } diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7a3870f385f..66b0b26a138 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -688,8 +688,12 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen * for qla_tgt_xmit_response LLD code */ + if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; + se_cmd->residual_count = 0; + } se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; - se_cmd->residual_count = se_cmd->data_length; + se_cmd->residual_count += se_cmd->data_length; cmd->bufflen = 0; } diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig index 2e4a28b018e..12f321dd239 100644 --- a/drivers/staging/media/davinci_vpfe/Kconfig +++ b/drivers/staging/media/davinci_vpfe/Kconfig @@ -1,6 +1,6 @@ config VIDEO_DM365_VPFE tristate "DM365 VPFE Media Controller Capture Driver" - depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE + depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF select VIDEOBUF2_DMA_CONTIG help Support for DM365 VPFE based Media Controller Capture driver. diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c index b88e1ddce22..d8ce20d2fbd 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c @@ -639,7 +639,8 @@ static int vpfe_probe(struct platform_device *pdev) if (ret) goto probe_free_dev_mem; - if (vpfe_initialize_modules(vpfe_dev, pdev)) + ret = vpfe_initialize_modules(vpfe_dev, pdev); + if (ret) goto probe_disable_clock; vpfe_dev->media_dev.dev = vpfe_dev->pdev; @@ -663,7 +664,8 @@ static int vpfe_probe(struct platform_device *pdev) /* set the driver data in platform device */ platform_set_drvdata(pdev, vpfe_dev); /* register subdevs/entities */ - if (vpfe_register_entities(vpfe_dev)) + ret = vpfe_register_entities(vpfe_dev); + if (ret) goto probe_out_v4l2_unregister; ret = vpfe_attach_irq(vpfe_dev); diff --git a/drivers/staging/media/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig index df6569b997b..34f3b6d02d2 100644 --- a/drivers/staging/media/solo6x10/Kconfig +++ b/drivers/staging/media/solo6x10/Kconfig @@ -5,6 +5,7 @@ config SOLO6X10 select VIDEOBUF2_DMA_SG select VIDEOBUF2_DMA_CONTIG select SND_PCM + select FONT_8x16 ---help--- This driver supports the Softlogic based MPEG-4 and h.264 codec cards. diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 13e9e715ad2..8d8b3ff6849 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser( struct iscsi_tpg_np *tpg_np_iser = NULL; char *endptr; u32 op; - int rc; + int rc = 0; op = simple_strtoul(page, &endptr, 0); if ((op != 1) && (op != 0)) { @@ -174,31 +174,32 @@ static ssize_t lio_target_np_store_iser( return -EINVAL; if (op) { - int rc = request_module("ib_isert"); - if (rc != 0) + rc = request_module("ib_isert"); + if (rc != 0) { pr_warn("Unable to request_module for ib_isert\n"); + rc = 0; + } tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, np->np_ip, tpg_np, ISCSI_INFINIBAND); - if (!tpg_np_iser || IS_ERR(tpg_np_iser)) + if (IS_ERR(tpg_np_iser)) { + rc = PTR_ERR(tpg_np_iser); goto out; + } } else { tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); - if (!tpg_np_iser) - goto out; - - rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); - if (rc < 0) - goto out; + if (tpg_np_iser) { + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); + if (rc < 0) + goto out; + } } - printk("lio_target_np_store_iser() done, op: %d\n", op); - iscsit_put_tpg(tpg); return count; out: iscsit_put_tpg(tpg); - return -EINVAL; + return rc; } TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 8e6298cc883..dcb199da06b 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) return 0; sess->time2retain_timer_flags |= ISCSI_TF_STOP; - spin_unlock_bh(&se_tpg->session_lock); + spin_unlock(&se_tpg->session_lock); del_timer_sync(&sess->time2retain_timer); - spin_lock_bh(&se_tpg->session_lock); + spin_lock(&se_tpg->session_lock); sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; pr_debug("Stopped Time2Retain Timer for SID: %u\n", sess->sid); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bb5d5c5bce6..3402241be87 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -984,8 +984,6 @@ int iscsi_target_setup_login_socket( } np->np_transport = t; - printk("Set np->np_transport to %p -> %s\n", np->np_transport, - np->np_transport->name); return 0; } @@ -1002,7 +1000,6 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) conn->sock = new_sock; conn->login_family = np->np_sockaddr.ss_family; - printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock); if (np->np_sockaddr.ss_family == AF_INET6) { memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7ad912060e2..cd5018ff9cd 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -721,9 +721,6 @@ int iscsi_target_locate_portal( start += strlen(key) + strlen(value) + 2; } - - printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf); - /* * See 5.3. Login Phase. */ diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 59bfaecc4e1..abfd9908978 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -244,14 +244,9 @@ static void pty_flush_buffer(struct tty_struct *tty) static int pty_open(struct tty_struct *tty, struct file *filp) { - int retval = -ENODEV; - if (!tty || !tty->link) - goto out; - - set_bit(TTY_IO_ERROR, &tty->flags); + return -ENODEV; - retval = -EIO; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) goto out; if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) @@ -262,9 +257,11 @@ static int pty_open(struct tty_struct *tty, struct file *filp) clear_bit(TTY_IO_ERROR, &tty->flags); clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); - retval = 0; + return 0; + out: - return retval; + set_bit(TTY_IO_ERROR, &tty->flags); + return -EIO; } static void pty_set_termios(struct tty_struct *tty, diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 097dff9c08a..bb91b4713eb 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -30,6 +30,12 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; +#ifdef CONFIG_64BIT + extern int iosapic_serial_irq(int cellnum); + if (!dev->irq && (dev->id.sversion == 0xad)) + dev->irq = iosapic_serial_irq(dev->mod_index-1); +#endif + if (!dev->irq) { /* We find some unattached serial ports by walking native * busses. These should be silently ignored. Otherwise, @@ -51,7 +57,8 @@ static int __init serial_init_chip(struct parisc_device *dev) memset(&uart, 0, sizeof(uart)); uart.port.iotype = UPIO_MEM; /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ - uart.port.uartclk = 7272727; + uart.port.uartclk = (dev->id.sversion != 0xad) ? + 7272727 : 1843200; uart.port.mapbase = address; uart.port.membase = ioremap_nocache(address, 16); uart.port.irq = dev->irq; @@ -73,6 +80,7 @@ static struct parisc_device_id serial_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad }, { 0 } }; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index fc2c06c66e8..2bd78e2ac8e 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -289,13 +289,10 @@ static int vt_disallocate(unsigned int vc_num) struct vc_data *vc = NULL; int ret = 0; - if (!vc_num) - return 0; - console_lock(); if (VT_BUSY(vc_num)) ret = -EBUSY; - else + else if (vc_num) vc = vc_deallocate(vc_num); console_unlock(); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 7ef3eb8617a..2311b1e4e43 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -4,11 +4,17 @@ menuconfig USB_PHY bool "USB Physical Layer drivers" help - USB controllers (those which are host, device or DRD) need a - device to handle the physical layer signalling, commonly called - a PHY. + Most USB controllers have the physical layer signalling part + (commonly called a PHY) built in. However, dual-role devices + (a.k.a. USB on-the-go) which support being USB master or slave + with the same connector often use an external PHY. - The following drivers add support for such PHY devices. + The drivers in this submenu add support for such PHY devices. + They are not needed for standard master-only (or the vast + majority of slave-only) USB interfaces. + + If you're not sure if this applies to you, it probably doesn't; + say N here. if USB_PHY diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index c92c5ed4e58..e581c2549a5 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -172,7 +172,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, }; diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index b353e7e3d48..4a2423e84d5 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h @@ -52,7 +52,9 @@ /* Abbott Diabetics vendor and product ids */ #define ABBOTT_VENDOR_ID 0x1a61 -#define ABBOTT_PRODUCT_ID 0x3410 +#define ABBOTT_STEREO_PLUG_ID 0x3410 +#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID +#define ABBOTT_STRIP_PORT_ID 0x3420 /* Commands */ #define TI_GET_VERSION 0x01 |