summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/bus/Kconfig8
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/brcmstb_gisb.c289
-rw-r--r--drivers/bus/omap_l3_noc.c406
-rw-r--r--drivers/bus/omap_l3_noc.h545
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c42
-rw-r--r--drivers/clocksource/exynos_mct.c8
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h30
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c39
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c40
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c43
-rw-r--r--drivers/cpuidle/Kconfig.arm6
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c99
-rw-r--r--drivers/dma/edma.c335
-rw-r--r--drivers/memory/mvebu-devbus.c229
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/keystone-reset.c166
-rw-r--r--drivers/power/reset/sun6i-reboot.c85
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-socfpga.c146
-rw-r--r--drivers/reset/reset-sunxi.c21
-rw-r--r--drivers/soc/Kconfig5
-rw-r--r--drivers/soc/Makefile5
-rw-r--r--drivers/soc/qcom/Kconfig11
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c85
-rw-r--r--drivers/tty/serial/msm_serial.c48
-rw-r--r--drivers/tty/serial/msm_serial.h5
34 files changed, 2185 insertions, 545 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 0a0a90f52d2..0e87a34b647 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -132,6 +132,8 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
+source "drivers/soc/Kconfig"
+
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7183b6af5da..1a1790e4de6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -33,6 +33,9 @@ obj-y += amba/
# really early.
obj-$(CONFIG_DMADEVICES) += dma/
+# SOC specific infrastructure drivers.
+obj-y += soc/
+
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 28634277888..a118ec1650f 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -4,6 +4,14 @@
menu "Bus devices"
+config BRCMSTB_GISB_ARB
+ bool "Broadcom STB GISB bus arbiter"
+ depends on ARM
+ help
+ Driver for the Broadcom Set Top Box System-on-a-chip internal bus
+ arbiter. This driver provides timeout and target abort error handling
+ and internal bus master decoding.
+
config IMX_WEIM
bool "Freescale EIM DRIVER"
depends on ARCH_MXC
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index f095aa771de..6a4ea7e4af1 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -2,6 +2,7 @@
# Makefile for the bus drivers.
#
+obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
new file mode 100644
index 00000000000..6159b7752a6
--- /dev/null
+++ b/drivers/bus/brcmstb_gisb.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+
+#include <asm/bug.h>
+#include <asm/signal.h>
+
+#define ARB_TIMER 0x008
+#define ARB_ERR_CAP_CLR 0x7e4
+#define ARB_ERR_CAP_CLEAR (1 << 0)
+#define ARB_ERR_CAP_HI_ADDR 0x7e8
+#define ARB_ERR_CAP_ADDR 0x7ec
+#define ARB_ERR_CAP_DATA 0x7f0
+#define ARB_ERR_CAP_STATUS 0x7f4
+#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
+#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
+#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
+#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
+#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
+#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
+#define ARB_ERR_CAP_MASTER 0x7f8
+
+struct brcmstb_gisb_arb_device {
+ void __iomem *base;
+ struct mutex lock;
+ struct list_head next;
+ u32 valid_mask;
+ const char *master_names[sizeof(u32) * BITS_PER_BYTE];
+};
+
+static LIST_HEAD(brcmstb_gisb_arb_device_list);
+
+static ssize_t gisb_arb_get_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+ u32 timeout;
+
+ mutex_lock(&gdev->lock);
+ timeout = ioread32(gdev->base + ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return sprintf(buf, "%d", timeout);
+}
+
+static ssize_t gisb_arb_set_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val == 0 || val >= 0xffffffff)
+ return -EINVAL;
+
+ mutex_lock(&gdev->lock);
+ iowrite32(val, gdev->base + ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return count;
+}
+
+static const char *
+brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
+ u32 masters)
+{
+ u32 mask = gdev->valid_mask & masters;
+
+ if (hweight_long(mask) != 1)
+ return NULL;
+
+ return gdev->master_names[ffs(mask) - 1];
+}
+
+static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ const char *reason)
+{
+ u32 cap_status;
+ unsigned long arb_addr;
+ u32 master;
+ const char *m_name;
+ char m_fmt[11];
+
+ cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
+ return 1;
+
+ /* Read the address and master */
+ arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff;
+#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32;
+#endif
+ master = ioread32(gdev->base + ARB_ERR_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+ __func__, reason, arb_addr,
+ cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
+ cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
+ m_name);
+
+ /* clear the GISB error */
+ iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR);
+
+ return 0;
+}
+
+static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ int ret = 0;
+ struct brcmstb_gisb_arb_device *gdev;
+
+ /* iterate over each GISB arb registered handlers */
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+ ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ /*
+ * If it was an imprecise abort, then we need to correct the
+ * return address to be _after_ the instruction.
+ */
+ if (fsr & (1 << 10))
+ regs->ARM_pc += 4;
+
+ return ret;
+}
+
+void __init brcmstb_hook_fault_code(void)
+{
+ hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
+ "imprecise external abort");
+}
+
+static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
+
+ return IRQ_HANDLED;
+}
+
+static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
+ gisb_arb_get_timeout, gisb_arb_set_timeout);
+
+static struct attribute *gisb_arb_sysfs_attrs[] = {
+ &dev_attr_gisb_arb_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group gisb_arb_sysfs_attr_group = {
+ .attrs = gisb_arb_sysfs_attrs,
+};
+
+static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct brcmstb_gisb_arb_device *gdev;
+ struct resource *r;
+ int err, timeout_irq, tea_irq;
+ unsigned int num_masters, j = 0;
+ int i, first, last;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ timeout_irq = platform_get_irq(pdev, 0);
+ tea_irq = platform_get_irq(pdev, 1);
+
+ gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ mutex_init(&gdev->lock);
+ INIT_LIST_HEAD(&gdev->next);
+
+ gdev->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!gdev->base)
+ return -ENOMEM;
+
+ err = devm_request_irq(&pdev->dev, timeout_irq,
+ brcmstb_gisb_timeout_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ err = devm_request_irq(&pdev->dev, tea_irq,
+ brcmstb_gisb_tea_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ /* If we do not have a valid mask, assume all masters are enabled */
+ if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
+ &gdev->valid_mask))
+ gdev->valid_mask = 0xffffffff;
+
+ /* Proceed with reading the litteral names if we agree on the
+ * number of masters
+ */
+ num_masters = of_property_count_strings(dn,
+ "brcm,gisb-arb-master-names");
+ if (hweight_long(gdev->valid_mask) == num_masters) {
+ first = ffs(gdev->valid_mask) - 1;
+ last = fls(gdev->valid_mask) - 1;
+
+ for (i = first; i < last; i++) {
+ if (!(gdev->valid_mask & BIT(i)))
+ continue;
+
+ of_property_read_string_index(dn,
+ "brcm,gisb-arb-master-names", j,
+ &gdev->master_names[i]);
+ j++;
+ }
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, gdev);
+
+ list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
+
+ dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
+ gdev->base, timeout_irq, tea_irq);
+
+ return 0;
+}
+
+static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
+ { .compatible = "brcm,gisb-arb" },
+ { },
+};
+
+static struct platform_driver brcmstb_gisb_arb_driver = {
+ .probe = brcmstb_gisb_arb_probe,
+ .driver = {
+ .name = "brcm-gisb-arb",
+ .owner = THIS_MODULE,
+ .of_match_table = brcmstb_gisb_arb_of_match,
+ },
+};
+
+static int __init brcm_gisb_driver_init(void)
+{
+ return platform_driver_register(&brcmstb_gisb_arb_driver);
+}
+
+module_init(brcm_gisb_driver_init);
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index feeecae623f..531ae591783 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,43 +1,45 @@
/*
- * OMAP4XXX L3 Interconnect error handling driver
+ * OMAP L3 Interconnect error handling driver
*
- * Copyright (C) 2011 Texas Corporation
+ * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sricharan <r.sricharan@ti.com>
*
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "omap_l3_noc.h"
-/*
- * Interrupt Handler for L3 error detection.
- * 1) Identify the L3 clockdomain partition to which the error belongs to.
- * 2) Identify the slave where the error information is logged
- * 3) Print the logged information.
- * 4) Add dump stack to provide kernel trace.
+/**
+ * l3_handle_target() - Handle Target specific parse and reporting
+ * @l3: pointer to l3 struct
+ * @base: base address of clkdm
+ * @flag_mux: flagmux corresponding to the event
+ * @err_src: error source index of the slave (target)
*
- * Two Types of errors :
+ * This does the second part of the error interrupt handling:
+ * 3) Parse in the slave information
+ * 4) Print the logged information.
+ * 5) Add dump stack to provide kernel trace.
+ * 6) Clear the source if known.
+ *
+ * This handles two types of errors:
* 1) Custom errors in L3 :
* Target like DMM/FW/EMIF generates SRESP=ERR error
* 2) Standard L3 error:
@@ -53,214 +55,264 @@
* can be trapped as well. But the trapping is implemented as part
* secure software and hence need not be implemented here.
*/
-static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
+ struct l3_flagmux_data *flag_mux, int err_src)
{
+ int k;
+ u32 std_err_main, clear, masterid;
+ u8 op_code, m_req_info;
+ void __iomem *l3_targ_base;
+ void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
+ void __iomem *l3_targ_hdr, *l3_targ_info;
+ struct l3_target_data *l3_targ_inst;
+ struct l3_masters_data *master;
+ char *target_name, *master_name = "UN IDENTIFIED";
+ char *err_description;
+ char err_string[30] = { 0 };
+ char info_string[60] = { 0 };
+
+ /* We DONOT expect err_src to go out of bounds */
+ BUG_ON(err_src > MAX_CLKDM_TARGETS);
+
+ if (err_src < flag_mux->num_targ_data) {
+ l3_targ_inst = &flag_mux->l3_targ[err_src];
+ target_name = l3_targ_inst->name;
+ l3_targ_base = base + l3_targ_inst->offset;
+ } else {
+ target_name = L3_TARGET_NOT_SUPPORTED;
+ }
- struct omap4_l3 *l3 = _l3;
- int inttype, i, k;
+ if (target_name == L3_TARGET_NOT_SUPPORTED)
+ return -ENODEV;
+
+ /* Read the stderrlog_main_source from clk domain */
+ l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
+ l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
+
+ std_err_main = readl_relaxed(l3_targ_stderr);
+
+ switch (std_err_main & CUSTOM_ERROR) {
+ case STANDARD_ERROR:
+ err_description = "Standard";
+ snprintf(err_string, sizeof(err_string),
+ ": At Address: 0x%08X ",
+ readl_relaxed(l3_targ_slvofslsb));
+
+ l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
+ break;
+
+ case CUSTOM_ERROR:
+ err_description = "Custom";
+
+ l3_targ_mstaddr = l3_targ_base +
+ L3_TARG_STDERRLOG_CINFO_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
+ break;
+
+ default:
+ /* Nothing to be handled here as of now */
+ return 0;
+ }
+
+ /* STDERRLOG_MSTADDR Stores the NTTP master address. */
+ masterid = (readl_relaxed(l3_targ_mstaddr) &
+ l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
+
+ for (k = 0, master = l3->l3_masters; k < l3->num_masters;
+ k++, master++) {
+ if (masterid == master->id) {
+ master_name = master->name;
+ break;
+ }
+ }
+
+ op_code = readl_relaxed(l3_targ_hdr) & 0x7;
+
+ m_req_info = readl_relaxed(l3_targ_info) & 0xF;
+ snprintf(info_string, sizeof(info_string),
+ ": %s in %s mode during %s access",
+ (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
+ (m_req_info & BIT(1)) ? "Supervisor" : "User",
+ (m_req_info & BIT(3)) ? "Debug" : "Functional");
+
+ WARN(true,
+ "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
+ dev_name(l3->dev),
+ err_description,
+ master_name, target_name,
+ l3_transaction_type[op_code],
+ err_string, info_string);
+
+ /* clear the std error log*/
+ clear = std_err_main | CLEAR_STDERR_LOG;
+ writel_relaxed(clear, l3_targ_stderr);
+
+ return 0;
+}
+
+/**
+ * l3_interrupt_handler() - interrupt handler for l3 events
+ * @irq: irq number
+ * @_l3: pointer to l3 structure
+ *
+ * Interrupt Handler for L3 error detection.
+ * 1) Identify the L3 clockdomain partition to which the error belongs to.
+ * 2) Identify the slave where the error information is logged
+ * ... handle the slave event..
+ * 7) if the slave is unknown, mask out the slave.
+ */
+static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+{
+ struct omap_l3 *l3 = _l3;
+ int inttype, i, ret;
int err_src = 0;
- u32 std_err_main, err_reg, clear, masterid;
- void __iomem *base, *l3_targ_base;
- char *target_name, *master_name = "UN IDENTIFIED";
+ u32 err_reg, mask_val;
+ void __iomem *base, *mask_reg;
+ struct l3_flagmux_data *flag_mux;
/* Get the Type of interrupt */
inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
- for (i = 0; i < L3_MODULES; i++) {
+ for (i = 0; i < l3->num_modules; i++) {
/*
* Read the regerr register of the clock domain
* to determine the source
*/
base = l3->l3_base[i];
- err_reg = __raw_readl(base + l3_flagmux[i] +
- + L3_FLAGMUX_REGERR0 + (inttype << 3));
+ flag_mux = l3->l3_flagmux[i];
+ err_reg = readl_relaxed(base + flag_mux->offset +
+ L3_FLAGMUX_REGERR0 + (inttype << 3));
+
+ err_reg &= ~(inttype ? flag_mux->mask_app_bits :
+ flag_mux->mask_dbg_bits);
/* Get the corresponding error and analyse */
if (err_reg) {
/* Identify the source from control status register */
err_src = __ffs(err_reg);
- /* Read the stderrlog_main_source from clk domain */
- l3_targ_base = base + *(l3_targ[i] + err_src);
- std_err_main = __raw_readl(l3_targ_base +
- L3_TARG_STDERRLOG_MAIN);
- masterid = __raw_readl(l3_targ_base +
- L3_TARG_STDERRLOG_MSTADDR);
-
- switch (std_err_main & CUSTOM_ERROR) {
- case STANDARD_ERROR:
- target_name =
- l3_targ_inst_name[i][err_src];
- WARN(true, "L3 standard error: TARGET:%s at address 0x%x\n",
- target_name,
- __raw_readl(l3_targ_base +
- L3_TARG_STDERRLOG_SLVOFSLSB));
- /* clear the std error log*/
- clear = std_err_main | CLEAR_STDERR_LOG;
- writel(clear, l3_targ_base +
- L3_TARG_STDERRLOG_MAIN);
- break;
-
- case CUSTOM_ERROR:
- target_name =
- l3_targ_inst_name[i][err_src];
- for (k = 0; k < NUM_OF_L3_MASTERS; k++) {
- if (masterid == l3_masters[k].id)
- master_name =
- l3_masters[k].name;
- }
- WARN(true, "L3 custom error: MASTER:%s TARGET:%s\n",
- master_name, target_name);
- /* clear the std error log*/
- clear = std_err_main | CLEAR_STDERR_LOG;
- writel(clear, l3_targ_base +
- L3_TARG_STDERRLOG_MAIN);
- break;
-
- default:
- /* Nothing to be handled here as of now */
- break;
+ ret = l3_handle_target(l3, base, flag_mux, err_src);
+
+ /*
+ * Certain plaforms may have "undocumented" status
+ * pending on boot. So dont generate a severe warning
+ * here. Just mask it off to prevent the error from
+ * reoccuring and locking up the system.
+ */
+ if (ret) {
+ dev_err(l3->dev,
+ "L3 %s error: target %d mod:%d %s\n",
+ inttype ? "debug" : "application",
+ err_src, i, "(unclearable)");
+
+ mask_reg = base + flag_mux->offset +
+ L3_FLAGMUX_MASK0 + (inttype << 3);
+ mask_val = readl_relaxed(mask_reg);
+ mask_val &= ~(1 << err_src);
+ writel_relaxed(mask_val, mask_reg);
+
+ /* Mark these bits as to be ignored */
+ if (inttype)
+ flag_mux->mask_app_bits |= 1 << err_src;
+ else
+ flag_mux->mask_dbg_bits |= 1 << err_src;
}
- /* Error found so break the for loop */
- break;
+
+ /* Error found so break the for loop */
+ break;
}
}
return IRQ_HANDLED;
}
-static int omap4_l3_probe(struct platform_device *pdev)
+static const struct of_device_id l3_noc_match[] = {
+ {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data},
+ {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
+ {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, l3_noc_match);
+
+static int omap_l3_probe(struct platform_device *pdev)
{
- static struct omap4_l3 *l3;
- struct resource *res;
- int ret;
+ const struct of_device_id *of_id;
+ static struct omap_l3 *l3;
+ int ret, i, res_idx;
+
+ of_id = of_match_device(l3_noc_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "OF data missing\n");
+ return -EINVAL;
+ }
- l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
+ l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
if (!l3)
return -ENOMEM;
+ memcpy(l3, of_id->data, sizeof(*l3));
+ l3->dev = &pdev->dev;
platform_set_drvdata(pdev, l3);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "couldn't find resource 0\n");
- ret = -ENODEV;
- goto err0;
- }
-
- l3->l3_base[0] = ioremap(res->start, resource_size(res));
- if (!l3->l3_base[0]) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err0;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "couldn't find resource 1\n");
- ret = -ENODEV;
- goto err1;
- }
-
- l3->l3_base[1] = ioremap(res->start, resource_size(res));
- if (!l3->l3_base[1]) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err1;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (!res) {
- dev_err(&pdev->dev, "couldn't find resource 2\n");
- ret = -ENODEV;
- goto err2;
- }
+ /* Get mem resources */
+ for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
+ struct resource *res;
- l3->l3_base[2] = ioremap(res->start, resource_size(res));
- if (!l3->l3_base[2]) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err2;
+ if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
+ /* First entry cannot be submodule */
+ BUG_ON(i == 0);
+ l3->l3_base[i] = l3->l3_base[i - 1];
+ continue;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
+ l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(l3->l3_base[i])) {
+ dev_err(l3->dev, "ioremap %d failed\n", i);
+ return PTR_ERR(l3->l3_base[i]);
+ }
+ res_idx++;
}
/*
* Setup interrupt Handlers
*/
l3->debug_irq = platform_get_irq(pdev, 0);
- ret = request_irq(l3->debug_irq,
- l3_interrupt_handler,
- IRQF_DISABLED, "l3-dbg-irq", l3);
+ ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
+ IRQF_DISABLED, "l3-dbg-irq", l3);
if (ret) {
- pr_crit("L3: request_irq failed to register for 0x%x\n",
- l3->debug_irq);
- goto err3;
+ dev_err(l3->dev, "request_irq failed for %d\n",
+ l3->debug_irq);
+ return ret;
}
l3->app_irq = platform_get_irq(pdev, 1);
- ret = request_irq(l3->app_irq,
- l3_interrupt_handler,
- IRQF_DISABLED, "l3-app-irq", l3);
- if (ret) {
- pr_crit("L3: request_irq failed to register for 0x%x\n",
- l3->app_irq);
- goto err4;
- }
+ ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
+ IRQF_DISABLED, "l3-app-irq", l3);
+ if (ret)
+ dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
- return 0;
-
-err4:
- free_irq(l3->debug_irq, l3);
-err3:
- iounmap(l3->l3_base[2]);
-err2:
- iounmap(l3->l3_base[1]);
-err1:
- iounmap(l3->l3_base[0]);
-err0:
- kfree(l3);
return ret;
}
-static int omap4_l3_remove(struct platform_device *pdev)
-{
- struct omap4_l3 *l3 = platform_get_drvdata(pdev);
-
- free_irq(l3->app_irq, l3);
- free_irq(l3->debug_irq, l3);
- iounmap(l3->l3_base[0]);
- iounmap(l3->l3_base[1]);
- iounmap(l3->l3_base[2]);
- kfree(l3);
-
- return 0;
-}
-
-#if defined(CONFIG_OF)
-static const struct of_device_id l3_noc_match[] = {
- {.compatible = "ti,omap4-l3-noc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, l3_noc_match);
-#else
-#define l3_noc_match NULL
-#endif
-
-static struct platform_driver omap4_l3_driver = {
- .probe = omap4_l3_probe,
- .remove = omap4_l3_remove,
+static struct platform_driver omap_l3_driver = {
+ .probe = omap_l3_probe,
.driver = {
.name = "omap_l3_noc",
.owner = THIS_MODULE,
- .of_match_table = l3_noc_match,
+ .of_match_table = of_match_ptr(l3_noc_match),
},
};
-static int __init omap4_l3_init(void)
+static int __init omap_l3_init(void)
{
- return platform_driver_register(&omap4_l3_driver);
+ return platform_driver_register(&omap_l3_driver);
}
-postcore_initcall_sync(omap4_l3_init);
+postcore_initcall_sync(omap_l3_init);
-static void __exit omap4_l3_exit(void)
+static void __exit omap_l3_exit(void)
{
- platform_driver_unregister(&omap4_l3_driver);
+ platform_driver_unregister(&omap_l3_driver);
}
-module_exit(omap4_l3_exit);
+module_exit(omap_l3_exit);
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index a6ce34dc481..551e0106143 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,29 +1,25 @@
/*
- * OMAP4XXX L3 Interconnect error handling driver header
+ * OMAP L3 Interconnect error handling driver header
*
- * Copyright (C) 2011 Texas Corporation
+ * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* sricharan <r.sricharan@ti.com>
*
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
*/
-#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
-#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+#ifndef __OMAP_L3_NOC_H
+#define __OMAP_L3_NOC_H
+
+#define MAX_L3_MODULES 3
+#define MAX_CLKDM_TARGETS 31
-#define L3_MODULES 3
#define CLEAR_STDERR_LOG (1 << 31)
#define CUSTOM_ERROR 0x2
#define STANDARD_ERROR 0x0
@@ -33,63 +29,165 @@
/* L3 TARG register offsets */
#define L3_TARG_STDERRLOG_MAIN 0x48
+#define L3_TARG_STDERRLOG_HDR 0x4c
+#define L3_TARG_STDERRLOG_MSTADDR 0x50
+#define L3_TARG_STDERRLOG_INFO 0x58
#define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c
-#define L3_TARG_STDERRLOG_MSTADDR 0x68
+#define L3_TARG_STDERRLOG_CINFO_INFO 0x64
+#define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68
+#define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c
#define L3_FLAGMUX_REGERR0 0xc
+#define L3_FLAGMUX_MASK0 0x8
+
+#define L3_TARGET_NOT_SUPPORTED NULL
+
+#define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0))
+
+static const char * const l3_transaction_type[] = {
+ /* 0 0 0 */ "Idle",
+ /* 0 0 1 */ "Write",
+ /* 0 1 0 */ "Read",
+ /* 0 1 1 */ "ReadEx",
+ /* 1 0 0 */ "Read Link",
+ /* 1 0 1 */ "Write Non-Posted",
+ /* 1 1 0 */ "Write Conditional",
+ /* 1 1 1 */ "Write Broadcast",
+};
-#define NUM_OF_L3_MASTERS (sizeof(l3_masters)/sizeof(l3_masters[0]))
-
-static u32 l3_flagmux[L3_MODULES] = {
- 0x500,
- 0x1000,
- 0X0200
-};
-
-/* L3 Target standard Error register offsets */
-static u32 l3_targ_inst_clk1[] = {
- 0x100, /* DMM1 */
- 0x200, /* DMM2 */
- 0x300, /* ABE */
- 0x400, /* L4CFG */
- 0x600, /* CLK2 PWR DISC */
- 0x0, /* Host CLK1 */
- 0x900 /* L4 Wakeup */
-};
-
-static u32 l3_targ_inst_clk2[] = {
- 0x500, /* CORTEX M3 */
- 0x300, /* DSS */
- 0x100, /* GPMC */
- 0x400, /* ISS */
- 0x700, /* IVAHD */
- 0xD00, /* missing in TRM corresponds to AES1*/
- 0x900, /* L4 PER0*/
- 0x200, /* OCMRAM */
- 0x100, /* missing in TRM corresponds to GPMC sERROR*/
- 0x600, /* SGX */
- 0x800, /* SL2 */
- 0x1600, /* C2C */
- 0x1100, /* missing in TRM corresponds PWR DISC CLK1*/
- 0xF00, /* missing in TRM corrsponds to SHA1*/
- 0xE00, /* missing in TRM corresponds to AES2*/
- 0xC00, /* L4 PER3 */
- 0xA00, /* L4 PER1*/
- 0xB00, /* L4 PER2*/
- 0x0, /* HOST CLK2 */
- 0x1800, /* CAL */
- 0x1700 /* LLI */
-};
-
-static u32 l3_targ_inst_clk3[] = {
- 0x0100 /* EMUSS */,
- 0x0300, /* DEBUGSS_CT_TBR */
- 0x0 /* HOST CLK3 */
-};
-
-static struct l3_masters_data {
+/**
+ * struct l3_masters_data - L3 Master information
+ * @id: ID of the L3 Master
+ * @name: master name
+ */
+struct l3_masters_data {
u32 id;
- char name[10];
-} l3_masters[] = {
+ char *name;
+};
+
+/**
+ * struct l3_target_data - L3 Target information
+ * @offset: Offset from base for L3 Target
+ * @name: Target name
+ *
+ * Target information is organized indexed by bit field definitions.
+ */
+struct l3_target_data {
+ u32 offset;
+ char *name;
+};
+
+/**
+ * struct l3_flagmux_data - Flag Mux information
+ * @offset: offset from base for flagmux register
+ * @l3_targ: array indexed by flagmux index (bit offset) pointing to the
+ * target data. unsupported ones are marked with
+ * L3_TARGET_NOT_SUPPORTED
+ * @num_targ_data: number of entries in target data
+ * @mask_app_bits: ignore these from raw application irq status
+ * @mask_dbg_bits: ignore these from raw debug irq status
+ */
+struct l3_flagmux_data {
+ u32 offset;
+ struct l3_target_data *l3_targ;
+ u8 num_targ_data;
+ u32 mask_app_bits;
+ u32 mask_dbg_bits;
+};
+
+
+/**
+ * struct omap_l3 - Description of data relevant for L3 bus.
+ * @dev: device representing the bus (populated runtime)
+ * @l3_base: base addresses of modules (populated runtime if 0)
+ * if set to L3_BASE_IS_SUBMODULE, then uses previous
+ * module index as the base address
+ * @l3_flag_mux: array containing flag mux data per module
+ * offset from corresponding module base indexed per
+ * module.
+ * @num_modules: number of clock domains / modules.
+ * @l3_masters: array pointing to master data containing name and register
+ * offset for the master.
+ * @num_master: number of masters
+ * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet
+ * @debug_irq: irq number of the debug interrupt (populated runtime)
+ * @app_irq: irq number of the application interrupt (populated runtime)
+ */
+struct omap_l3 {
+ struct device *dev;
+
+ void __iomem *l3_base[MAX_L3_MODULES];
+ struct l3_flagmux_data **l3_flagmux;
+ int num_modules;
+
+ struct l3_masters_data *l3_masters;
+ int num_masters;
+ u32 mst_addr_mask;
+
+ int debug_irq;
+ int app_irq;
+};
+
+static struct l3_target_data omap_l3_target_data_clk1[] = {
+ {0x100, "DMM1",},
+ {0x200, "DMM2",},
+ {0x300, "ABE",},
+ {0x400, "L4CFG",},
+ {0x600, "CLK2PWRDISC",},
+ {0x0, "HOSTCLK1",},
+ {0x900, "L4WAKEUP",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk1 = {
+ .offset = 0x500,
+ .l3_targ = omap_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk2[] = {
+ {0x500, "CORTEXM3",},
+ {0x300, "DSS",},
+ {0x100, "GPMC",},
+ {0x400, "ISS",},
+ {0x700, "IVAHD",},
+ {0xD00, "AES1",},
+ {0x900, "L4PER0",},
+ {0x200, "OCMRAM",},
+ {0x100, "GPMCsERROR",},
+ {0x600, "SGX",},
+ {0x800, "SL2",},
+ {0x1600, "C2C",},
+ {0x1100, "PWRDISCCLK1",},
+ {0xF00, "SHA1",},
+ {0xE00, "AES2",},
+ {0xC00, "L4PER3",},
+ {0xA00, "L4PER1",},
+ {0xB00, "L4PER2",},
+ {0x0, "HOSTCLK2",},
+ {0x1800, "CAL",},
+ {0x1700, "LLI",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
+ .offset = 0x1000,
+ .l3_targ = omap_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk3[] = {
+ {0x0100, "EMUSS",},
+ {0x0300, "DEBUG SOURCE",},
+ {0x0, "HOST CLK3",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3),
+};
+
+static struct l3_masters_data omap_l3_masters[] = {
{ 0x0 , "MPU"},
{ 0x10, "CS_ADP"},
{ 0x14, "xxx"},
@@ -117,60 +215,261 @@ static struct l3_masters_data {
{ 0xC8, "USBHOSTFS"}
};
-static char *l3_targ_inst_name[L3_MODULES][21] = {
- {
- "DMM1",
- "DMM2",
- "ABE",
- "L4CFG",
- "CLK2 PWR DISC",
- "HOST CLK1",
- "L4 WAKEUP"
- },
- {
- "CORTEX M3" ,
- "DSS ",
- "GPMC ",
- "ISS ",
- "IVAHD ",
- "AES1",
- "L4 PER0",
- "OCMRAM ",
- "GPMC sERROR",
- "SGX ",
- "SL2 ",
- "C2C ",
- "PWR DISC CLK1",
- "SHA1",
- "AES2",
- "L4 PER3",
- "L4 PER1",
- "L4 PER2",
- "HOST CLK2",
- "CAL",
- "LLI"
- },
- {
- "EMUSS",
- "DEBUG SOURCE",
- "HOST CLK3"
- },
-};
-
-static u32 *l3_targ[L3_MODULES] = {
- l3_targ_inst_clk1,
- l3_targ_inst_clk2,
- l3_targ_inst_clk3,
-};
-
-struct omap4_l3 {
- struct device *dev;
- struct clk *ick;
+static struct l3_flagmux_data *omap_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap_l3_data = {
+ .l3_flagmux = omap_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
- /* memory base */
- void __iomem *l3_base[L3_MODULES];
+/* DRA7 data */
+static struct l3_target_data dra_l3_target_data_clk1[] = {
+ {0x2a00, "AES1",},
+ {0x0200, "DMM_P1",},
+ {0x0600, "DSP2_SDMA",},
+ {0x0b00, "EVE2",},
+ {0x1300, "DMM_P2",},
+ {0x2c00, "AES2",},
+ {0x0300, "DSP1_SDMA",},
+ {0x0a00, "EVE1",},
+ {0x0c00, "EVE3",},
+ {0x0d00, "EVE4",},
+ {0x2900, "DSS",},
+ {0x0100, "GPMC",},
+ {0x3700, "PCIE1",},
+ {0x1600, "IVA_CONFIG",},
+ {0x1800, "IVA_SL2IF",},
+ {0x0500, "L4_CFG",},
+ {0x1d00, "L4_WKUP",},
+ {0x3800, "PCIE2",},
+ {0x3300, "SHA2_1",},
+ {0x1200, "GPU",},
+ {0x1000, "IPU1",},
+ {0x1100, "IPU2",},
+ {0x2000, "TPCC_EDMA",},
+ {0x2e00, "TPTC1_EDMA",},
+ {0x2b00, "TPTC2_EDMA",},
+ {0x0700, "VCP1",},
+ {0x2500, "L4_PER2_P3",},
+ {0x0e00, "L4_PER3_P3",},
+ {0x2200, "MMU1",},
+ {0x1400, "PRUSS1",},
+ {0x1500, "PRUSS2"},
+ {0x0800, "VCP1",},
+};
- int debug_irq;
- int app_irq;
+static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
+ .offset = 0x803500,
+ .l3_targ = dra_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1),
+};
+
+static struct l3_target_data dra_l3_target_data_clk2[] = {
+ {0x0, "HOST CLK1",},
+ {0x0, "HOST CLK2",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x3400, "SHA2_2",},
+ {0x0900, "BB2D",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x2100, "L4_PER1_P3",},
+ {0x1c00, "L4_PER1_P1",},
+ {0x1f00, "L4_PER1_P2",},
+ {0x2300, "L4_PER2_P1",},
+ {0x2400, "L4_PER2_P2",},
+ {0x2600, "L4_PER3_P1",},
+ {0x2700, "L4_PER3_P2",},
+ {0x2f00, "MCASP1",},
+ {0x3000, "MCASP2",},
+ {0x3100, "MCASP3",},
+ {0x2800, "MMU2",},
+ {0x0f00, "OCMC_RAM1",},
+ {0x1700, "OCMC_RAM2",},
+ {0x1900, "OCMC_RAM3",},
+ {0x1e00, "OCMC_ROM",},
+ {0x3900, "QSPI",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk2 = {
+ .offset = 0x803600,
+ .l3_targ = dra_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2),
+};
+
+static struct l3_target_data dra_l3_target_data_clk3[] = {
+ {0x0100, "L3_INSTR"},
+ {0x0300, "DEBUGSS_CT_TBR"},
+ {0x0, "HOST CLK3"},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk3 = {
+ .offset = 0x200,
+ .l3_targ = dra_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3),
+};
+
+static struct l3_masters_data dra_l3_masters[] = {
+ { 0x0, "MPU" },
+ { 0x4, "CS_DAP" },
+ { 0x5, "IEEE1500_2_OCP" },
+ { 0x8, "DSP1_MDMA" },
+ { 0x9, "DSP1_CFG" },
+ { 0xA, "DSP1_DMA" },
+ { 0xB, "DSP2_MDMA" },
+ { 0xC, "DSP2_CFG" },
+ { 0xD, "DSP2_DMA" },
+ { 0xE, "IVA" },
+ { 0x10, "EVE1_P1" },
+ { 0x11, "EVE2_P1" },
+ { 0x12, "EVE3_P1" },
+ { 0x13, "EVE4_P1" },
+ { 0x14, "PRUSS1 PRU1" },
+ { 0x15, "PRUSS1 PRU2" },
+ { 0x16, "PRUSS2 PRU1" },
+ { 0x17, "PRUSS2 PRU2" },
+ { 0x18, "IPU1" },
+ { 0x19, "IPU2" },
+ { 0x1A, "SDMA" },
+ { 0x1B, "CDMA" },
+ { 0x1C, "TC1_EDMA" },
+ { 0x1D, "TC2_EDMA" },
+ { 0x20, "DSS" },
+ { 0x21, "MMU1" },
+ { 0x22, "PCIE1" },
+ { 0x23, "MMU2" },
+ { 0x24, "VIP1" },
+ { 0x25, "VIP2" },
+ { 0x26, "VIP3" },
+ { 0x27, "VPE" },
+ { 0x28, "GPU_P1" },
+ { 0x29, "BB2D" },
+ { 0x29, "GPU_P2" },
+ { 0x2B, "GMAC_SW" },
+ { 0x2C, "USB3" },
+ { 0x2D, "USB2_SS" },
+ { 0x2E, "USB2_ULPI_SS1" },
+ { 0x2F, "USB2_ULPI_SS2" },
+ { 0x30, "CSI2_1" },
+ { 0x31, "CSI2_2" },
+ { 0x33, "SATA" },
+ { 0x34, "EVE1_P2" },
+ { 0x35, "EVE2_P2" },
+ { 0x36, "EVE3_P2" },
+ { 0x37, "EVE4_P2" }
};
-#endif
+
+static struct l3_flagmux_data *dra_l3_flagmux[] = {
+ &dra_l3_flagmux_clk1,
+ &dra_l3_flagmux_clk2,
+ &dra_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 dra_l3_data = {
+ .l3_base = { [1] = L3_BASE_IS_SUBMODULE },
+ .l3_flagmux = dra_l3_flagmux,
+ .num_modules = ARRAY_SIZE(dra_l3_flagmux),
+ .l3_masters = dra_l3_masters,
+ .num_masters = ARRAY_SIZE(dra_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* AM4372 data */
+static struct l3_target_data am4372_l3_target_data_200f[] = {
+ {0xf00, "EMIF",},
+ {0x1200, "DES",},
+ {0x400, "OCMCRAM",},
+ {0x700, "TPTC0",},
+ {0x800, "TPTC1",},
+ {0x900, "TPTC2"},
+ {0xb00, "TPCC",},
+ {0xd00, "DEBUGSS",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x200, "SHA",},
+ {0xc00, "SGX530",},
+ {0x500, "AES0",},
+ {0xa00, "L4_FAST",},
+ {0x300, "MPUSS_L2_RAM",},
+ {0x100, "ICSS",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_200f = {
+ .offset = 0x1000,
+ .l3_targ = am4372_l3_target_data_200f,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f),
+};
+
+static struct l3_target_data am4372_l3_target_data_100s[] = {
+ {0x100, "L4_PER_0",},
+ {0x200, "L4_PER_1",},
+ {0x300, "L4_PER_2",},
+ {0x400, "L4_PER_3",},
+ {0x800, "McASP0",},
+ {0x900, "McASP1",},
+ {0xC00, "MMCHS2",},
+ {0x700, "GPMC",},
+ {0xD00, "L4_FW",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x500, "ADCTSC",},
+ {0xE00, "L4_WKUP",},
+ {0xA00, "MAG_CARD",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_100s = {
+ .offset = 0x600,
+ .l3_targ = am4372_l3_target_data_100s,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s),
+};
+
+static struct l3_masters_data am4372_l3_masters[] = {
+ { 0x0, "M1 (128-bit)"},
+ { 0x1, "M2 (64-bit)"},
+ { 0x4, "DAP"},
+ { 0x5, "P1500"},
+ { 0xC, "ICSS0"},
+ { 0xD, "ICSS1"},
+ { 0x14, "Wakeup Processor"},
+ { 0x18, "TPTC0 Read"},
+ { 0x19, "TPTC0 Write"},
+ { 0x1A, "TPTC1 Read"},
+ { 0x1B, "TPTC1 Write"},
+ { 0x1C, "TPTC2 Read"},
+ { 0x1D, "TPTC2 Write"},
+ { 0x20, "SGX530"},
+ { 0x21, "OCP WP Traffic Probe"},
+ { 0x22, "OCP WP DMA Profiling"},
+ { 0x23, "OCP WP Event Trace"},
+ { 0x25, "DSS"},
+ { 0x28, "Crypto DMA RD"},
+ { 0x29, "Crypto DMA WR"},
+ { 0x2C, "VPFE0"},
+ { 0x2D, "VPFE1"},
+ { 0x30, "GEMAC"},
+ { 0x34, "USB0 RD"},
+ { 0x35, "USB0 WR"},
+ { 0x36, "USB1 RD"},
+ { 0x37, "USB1 WR"},
+};
+
+static struct l3_flagmux_data *am4372_l3_flagmux[] = {
+ &am4372_l3_flagmux_200f,
+ &am4372_l3_flagmux_100s,
+};
+
+static const struct omap_l3 am4372_l3_data = {
+ .l3_flagmux = am4372_l3_flagmux,
+ .num_modules = ARRAY_SIZE(am4372_l3_flagmux),
+ .l3_masters = am4372_l3_masters,
+ .num_masters = ARRAY_SIZE(am4372_l3_masters),
+ /* All 6 bits of register field used to distinguish initiator */
+ .mst_addr_mask = 0x3F,
+};
+
+#endif /* __OMAP_L3_NOC_H */
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 870e18b9a68..1fad4c5e3f5 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -24,6 +24,8 @@
#define APLL_CON0 0x100
#define SRC_CPU 0x200
#define DIV_CPU0 0x500
+#define PWR_CTRL1 0x1020
+#define PWR_CTRL2 0x1024
#define MPLL_LOCK 0x4000
#define MPLL_CON0 0x4100
#define SRC_CORE1 0x4204
@@ -84,6 +86,23 @@
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
+/*Below definitions are used for PWR_CTRL settings*/
+#define PWR_CTRL1_CORE2_DOWN_RATIO (7 << 28)
+#define PWR_CTRL1_CORE1_DOWN_RATIO (7 << 16)
+#define PWR_CTRL1_DIV2_DOWN_EN (1 << 9)
+#define PWR_CTRL1_DIV1_DOWN_EN (1 << 8)
+#define PWR_CTRL1_USE_CORE1_WFE (1 << 5)
+#define PWR_CTRL1_USE_CORE0_WFE (1 << 4)
+#define PWR_CTRL1_USE_CORE1_WFI (1 << 1)
+#define PWR_CTRL1_USE_CORE0_WFI (1 << 0)
+
+#define PWR_CTRL2_DIV2_UP_EN (1 << 25)
+#define PWR_CTRL2_DIV1_UP_EN (1 << 24)
+#define PWR_CTRL2_DUR_STANDBY2_VAL (1 << 16)
+#define PWR_CTRL2_DUR_STANDBY1_VAL (1 << 8)
+#define PWR_CTRL2_CORE2_UP_RATIO (1 << 4)
+#define PWR_CTRL2_CORE1_UP_RATIO (1 << 0)
+
/* list of PLLs to be registered */
enum exynos5250_plls {
apll, mpll, cpll, epll, vpll, gpll, bpll,
@@ -102,6 +121,8 @@ static struct samsung_clk_reg_dump *exynos5250_save;
static unsigned long exynos5250_clk_regs[] __initdata = {
SRC_CPU,
DIV_CPU0,
+ PWR_CTRL1,
+ PWR_CTRL2,
SRC_CORE1,
SRC_TOP0,
SRC_TOP1,
@@ -736,6 +757,7 @@ static struct of_device_id ext_clk_match[] __initdata = {
static void __init exynos5250_clk_init(struct device_node *np)
{
struct samsung_clk_provider *ctx;
+ unsigned int tmp;
if (np) {
reg_base = of_iomap(np, 0);
@@ -776,6 +798,26 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_register_gate(ctx, exynos5250_gate_clks,
ARRAY_SIZE(exynos5250_gate_clks));
+ /*
+ * Enable arm clock down (in idle) and set arm divider
+ * ratios in WFI/WFE state.
+ */
+ tmp = (PWR_CTRL1_CORE2_DOWN_RATIO | PWR_CTRL1_CORE1_DOWN_RATIO |
+ PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN |
+ PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
+ PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
+ __raw_writel(tmp, reg_base + PWR_CTRL1);
+
+ /*
+ * Enable arm clock up (on exiting idle). Set arm divider
+ * ratios when not in idle along with the standby duration
+ * ratios.
+ */
+ tmp = (PWR_CTRL2_DIV2_UP_EN | PWR_CTRL2_DIV1_UP_EN |
+ PWR_CTRL2_DUR_STANDBY2_VAL | PWR_CTRL2_DUR_STANDBY1_VAL |
+ PWR_CTRL2_CORE2_UP_RATIO | PWR_CTRL2_CORE1_UP_RATIO);
+ __raw_writel(tmp, reg_base + PWR_CTRL2);
+
exynos5250_clk_sleep_init();
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index acf5a329d53..8d6420013a0 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -24,6 +24,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
#define EXYNOS4_MCTREG(x) (x)
#define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
@@ -192,12 +193,19 @@ struct clocksource mct_frc = {
.resume = exynos4_frc_resume,
};
+static u64 notrace exynos4_read_sched_clock(void)
+{
+ return exynos4_frc_read(&mct_frc);
+}
+
static void __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start(0, 0);
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
+
+ sched_clock_register(exynos4_read_sched_clock, 64, clk_rate);
}
static void exynos4_mct_comp0_stop(void)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 580503513f0..d2c7b4b8ffd 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -30,7 +30,7 @@ config ARM_EXYNOS_CPUFREQ
config ARM_EXYNOS4210_CPUFREQ
bool "SAMSUNG EXYNOS4210"
- depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM
+ depends on CPU_EXYNOS4210
default y
select ARM_EXYNOS_CPUFREQ
help
@@ -41,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ
config ARM_EXYNOS4X12_CPUFREQ
bool "SAMSUNG EXYNOS4x12"
- depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
+ depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
default y
select ARM_EXYNOS_CPUFREQ
help
@@ -52,7 +52,7 @@ config ARM_EXYNOS4X12_CPUFREQ
config ARM_EXYNOS5250_CPUFREQ
bool "SAMSUNG EXYNOS5250"
- depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM
+ depends on SOC_EXYNOS5250
default y
select ARM_EXYNOS_CPUFREQ
help
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index e8a4a7ed38c..348c8bafe43 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -19,8 +19,6 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <plat/cpu.h>
-
#include "exynos-cpufreq.h"
static struct exynos_dvfs_info *exynos_info;
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
index f189547bb44..51af42e1b7f 100644
--- a/drivers/cpufreq/exynos-cpufreq.h
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -49,6 +49,7 @@ struct exynos_dvfs_info {
struct cpufreq_frequency_table *freq_table;
void (*set_freq)(unsigned int, unsigned int);
bool (*need_apll_change)(unsigned int, unsigned int);
+ void __iomem *cmu_regs;
};
#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
@@ -76,24 +77,21 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
}
#endif
-#include <plat/cpu.h>
-#include <mach/map.h>
+#define EXYNOS4_CLKSRC_CPU 0x14200
+#define EXYNOS4_CLKMUX_STATCPU 0x14400
-#define EXYNOS4_CLKSRC_CPU (S5P_VA_CMU + 0x14200)
-#define EXYNOS4_CLKMUX_STATCPU (S5P_VA_CMU + 0x14400)
-
-#define EXYNOS4_CLKDIV_CPU (S5P_VA_CMU + 0x14500)
-#define EXYNOS4_CLKDIV_CPU1 (S5P_VA_CMU + 0x14504)
-#define EXYNOS4_CLKDIV_STATCPU (S5P_VA_CMU + 0x14600)
-#define EXYNOS4_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x14604)
+#define EXYNOS4_CLKDIV_CPU 0x14500
+#define EXYNOS4_CLKDIV_CPU1 0x14504
+#define EXYNOS4_CLKDIV_STATCPU 0x14600
+#define EXYNOS4_CLKDIV_STATCPU1 0x14604
#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
-#define EXYNOS5_APLL_LOCK (S5P_VA_CMU + 0x00000)
-#define EXYNOS5_APLL_CON0 (S5P_VA_CMU + 0x00100)
-#define EXYNOS5_CLKMUX_STATCPU (S5P_VA_CMU + 0x00400)
-#define EXYNOS5_CLKDIV_CPU0 (S5P_VA_CMU + 0x00500)
-#define EXYNOS5_CLKDIV_CPU1 (S5P_VA_CMU + 0x00504)
-#define EXYNOS5_CLKDIV_STATCPU0 (S5P_VA_CMU + 0x00600)
-#define EXYNOS5_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x00604)
+#define EXYNOS5_APLL_LOCK 0x00000
+#define EXYNOS5_APLL_CON0 0x00100
+#define EXYNOS5_CLKMUX_STATCPU 0x00400
+#define EXYNOS5_CLKDIV_CPU0 0x00500
+#define EXYNOS5_CLKDIV_CPU1 0x00504
+#define EXYNOS5_CLKDIV_STATCPU0 0x00600
+#define EXYNOS5_CLKDIV_STATCPU1 0x00604
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index 6384e5b9a34..61a54310a1b 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -16,6 +16,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "exynos-cpufreq.h"
@@ -23,6 +25,7 @@ static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
static unsigned int exynos4210_volt_table[] = {
1250000, 1150000, 1050000, 975000, 950000,
@@ -60,20 +63,20 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
tmp = apll_freq_4210[div_index].clk_div_cpu0;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU);
} while (tmp & 0x1111111);
/* Change Divider - CPU1 */
tmp = apll_freq_4210[div_index].clk_div_cpu1;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
} while (tmp & 0x11);
}
@@ -85,7 +88,7 @@ static void exynos4210_set_apll(unsigned int index)
clk_set_parent(moutcore, mout_mpll);
do {
- tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU)
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
>> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
tmp &= 0x7;
} while (tmp != 0x2);
@@ -96,7 +99,7 @@ static void exynos4210_set_apll(unsigned int index)
clk_set_parent(moutcore, mout_apll);
do {
- tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
@@ -115,8 +118,30 @@ static void exynos4210_set_frequency(unsigned int old_index,
int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
+ struct device_node *np;
unsigned long rate;
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-cpu0 driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -143,6 +168,8 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
info->freq_table = exynos4210_freq_table;
info->set_freq = exynos4210_set_frequency;
+ cpufreq = info;
+
return 0;
err_mout_apll:
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 63a3907ce57..351a2074cfe 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -16,6 +16,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "exynos-cpufreq.h"
@@ -23,6 +25,7 @@ static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
static unsigned int exynos4x12_volt_table[] = {
1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
@@ -105,19 +108,20 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
tmp = apll_freq_4x12[div_index].clk_div_cpu0;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
- while (__raw_readl(EXYNOS4_CLKDIV_STATCPU) & 0x11111111)
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU)
+ & 0x11111111)
cpu_relax();
/* Change Divider - CPU1 */
tmp = apll_freq_4x12[div_index].clk_div_cpu1;
- __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
do {
cpu_relax();
- tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
} while (tmp != 0x0);
}
@@ -130,7 +134,7 @@ static void exynos4x12_set_apll(unsigned int index)
do {
cpu_relax();
- tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU)
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
>> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
tmp &= 0x7;
} while (tmp != 0x2);
@@ -142,7 +146,7 @@ static void exynos4x12_set_apll(unsigned int index)
do {
cpu_relax();
- tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
@@ -161,8 +165,30 @@ static void exynos4x12_set_frequency(unsigned int old_index,
int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
{
+ struct device_node *np;
unsigned long rate;
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-cpu0 driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -194,6 +220,8 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
info->freq_table = exynos4x12_freq_table;
info->set_freq = exynos4x12_set_frequency;
+ cpufreq = info;
+
return 0;
err_mout_apll:
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index 363a0b3fe1b..c91ce69dc63 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -16,8 +16,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
-
-#include <mach/map.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include "exynos-cpufreq.h"
@@ -25,6 +25,7 @@ static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
static unsigned int exynos5250_volt_table[] = {
1300000, 1250000, 1225000, 1200000, 1150000,
@@ -87,17 +88,18 @@ static void set_clkdiv(unsigned int div_index)
tmp = apll_freq_5250[div_index].clk_div_cpu0;
- __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0);
- while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111)
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0)
+ & 0x11111111)
cpu_relax();
/* Change Divider - CPU1 */
tmp = apll_freq_5250[div_index].clk_div_cpu1;
- __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1);
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1);
- while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11)
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11)
cpu_relax();
}
@@ -111,7 +113,8 @@ static void set_apll(unsigned int index)
do {
cpu_relax();
- tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16);
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU)
+ >> 16);
tmp &= 0x7;
} while (tmp != 0x2);
@@ -122,7 +125,7 @@ static void set_apll(unsigned int index)
do {
cpu_relax();
- tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU);
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU);
tmp &= (0x7 << 16);
} while (tmp != (0x1 << 16));
}
@@ -141,8 +144,30 @@ static void exynos5250_set_frequency(unsigned int old_index,
int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
{
+ struct device_node *np;
unsigned long rate;
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-cpu0 driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -169,6 +194,8 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
info->freq_table = exynos5250_freq_table;
info->set_freq = exynos5250_set_frequency;
+ cpufreq = info;
+
return 0;
err_mout_apll:
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 5bb94780d37..ae1d78ea7df 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -49,3 +49,9 @@ config ARM_AT91_CPUIDLE
depends on ARCH_AT91
help
Select this to enable cpuidle for AT91 processors
+
+config ARM_EXYNOS_CPUIDLE
+ bool "Cpu Idle Driver for the Exynos processors"
+ depends on ARCH_EXYNOS
+ help
+ Select this to enable cpuidle for Exynos processors
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 9902d052bd8..cd3ab59f846 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
+obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
###############################################################################
# POWERPC drivers
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
new file mode 100644
index 00000000000..7c015126382
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -0,0 +1,99 @@
+/* linux/arch/arm/mach-exynos/cpuidle.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <asm/cpuidle.h>
+
+static void (*exynos_enter_aftr)(void);
+
+static int idle_finisher(unsigned long flags)
+{
+ exynos_enter_aftr();
+ cpu_do_idle();
+
+ return 1;
+}
+
+static int exynos_enter_core0_aftr(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ cpu_pm_enter();
+ cpu_suspend(0, idle_finisher);
+ cpu_pm_exit();
+
+ return index;
+}
+
+static int exynos_enter_lowpower(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ int new_index = index;
+
+ /* AFTR can only be entered when cores other than CPU0 are offline */
+ if (num_online_cpus() > 1 || dev->cpu != 0)
+ new_index = drv->safe_state_index;
+
+ if (new_index == 0)
+ return arm_cpuidle_simple_enter(dev, drv, new_index);
+ else
+ return exynos_enter_core0_aftr(dev, drv, new_index);
+}
+
+static struct cpuidle_driver exynos_idle_driver = {
+ .name = "exynos_idle",
+ .owner = THIS_MODULE,
+ .states = {
+ [0] = ARM_CPUIDLE_WFI_STATE,
+ [1] = {
+ .enter = exynos_enter_lowpower,
+ .exit_latency = 300,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+ },
+ .state_count = 2,
+ .safe_state_index = 0,
+};
+
+static int exynos_cpuidle_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ exynos_enter_aftr = (void *)(pdev->dev.platform_data);
+
+ ret = cpuidle_register(&exynos_idle_driver, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register cpuidle driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver exynos_cpuidle_driver = {
+ .probe = exynos_cpuidle_probe,
+ .driver = {
+ .name = "exynos_cpuidle",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(exynos_cpuidle_driver);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 926360c2db6..d08c4dedef3 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -57,14 +57,48 @@
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
+struct edma_pset {
+ u32 len;
+ dma_addr_t addr;
+ struct edmacc_param param;
+};
+
struct edma_desc {
struct virt_dma_desc vdesc;
struct list_head node;
+ enum dma_transfer_direction direction;
int cyclic;
int absync;
int pset_nr;
+ struct edma_chan *echan;
int processed;
- struct edmacc_param pset[0];
+
+ /*
+ * The following 4 elements are used for residue accounting.
+ *
+ * - processed_stat: the number of SG elements we have traversed
+ * so far to cover accounting. This is updated directly to processed
+ * during edma_callback and is always <= processed, because processed
+ * refers to the number of pending transfer (programmed to EDMA
+ * controller), where as processed_stat tracks number of transfers
+ * accounted for so far.
+ *
+ * - residue: The amount of bytes we have left to transfer for this desc
+ *
+ * - residue_stat: The residue in bytes of data we have covered
+ * so far for accounting. This is updated directly to residue
+ * during callbacks to keep it current.
+ *
+ * - sg_len: Tracks the length of the current intermediate transfer,
+ * this is required to update the residue during intermediate transfer
+ * completion callback.
+ */
+ int processed_stat;
+ u32 sg_len;
+ u32 residue;
+ u32 residue_stat;
+
+ struct edma_pset pset[0];
};
struct edma_cc;
@@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan)
/* Find out how many left */
left = edesc->pset_nr - edesc->processed;
nslots = min(MAX_NR_SG, left);
+ edesc->sg_len = 0;
/* Write descriptor PaRAM set(s) */
for (i = 0; i < nslots; i++) {
j = i + edesc->processed;
- edma_write_slot(echan->slot[i], &edesc->pset[j]);
- dev_dbg(echan->vchan.chan.device->dev,
+ edma_write_slot(echan->slot[i], &edesc->pset[j].param);
+ edesc->sg_len += edesc->pset[j].len;
+ dev_vdbg(echan->vchan.chan.device->dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
" slot\t%d\n"
@@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan)
" cidx\t%08x\n"
" lkrld\t%08x\n",
j, echan->ch_num, echan->slot[i],
- edesc->pset[j].opt,
- edesc->pset[j].src,
- edesc->pset[j].dst,
- edesc->pset[j].a_b_cnt,
- edesc->pset[j].ccnt,
- edesc->pset[j].src_dst_bidx,
- edesc->pset[j].src_dst_cidx,
- edesc->pset[j].link_bcntrld);
+ edesc->pset[j].param.opt,
+ edesc->pset[j].param.src,
+ edesc->pset[j].param.dst,
+ edesc->pset[j].param.a_b_cnt,
+ edesc->pset[j].param.ccnt,
+ edesc->pset[j].param.src_dst_bidx,
+ edesc->pset[j].param.src_dst_cidx,
+ edesc->pset[j].param.link_bcntrld);
/* Link to the previous slot if not the last set */
if (i != (nslots - 1))
edma_link(echan->slot[i], echan->slot[i+1]);
@@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan)
}
if (edesc->processed <= MAX_NR_SG) {
- dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ dev_dbg(dev, "first transfer starting on channel %d\n",
+ echan->ch_num);
edma_start(echan->ch_num);
} else {
dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
@@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan)
* MAX_NR_SG
*/
if (echan->missed) {
- dev_dbg(dev, "missed event in execute detected\n");
+ dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
edma_clean_channel(echan->ch_num);
edma_stop(echan->ch_num);
edma_start(echan->ch_num);
@@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan,
return 0;
}
+static int edma_dma_pause(struct edma_chan *echan)
+{
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!echan->edesc->cyclic)
+ return -EINVAL;
+
+ edma_pause(echan->ch_num);
+ return 0;
+}
+
+static int edma_dma_resume(struct edma_chan *echan)
+{
+ /* Pause/Resume only allowed with cyclic mode */
+ if (!echan->edesc->cyclic)
+ return -EINVAL;
+
+ edma_resume(echan->ch_num);
+ return 0;
+}
+
static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
@@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
config = (struct dma_slave_config *)arg;
ret = edma_slave_config(echan, config);
break;
+ case DMA_PAUSE:
+ ret = edma_dma_pause(echan);
+ break;
+
+ case DMA_RESUME:
+ ret = edma_dma_resume(echan);
+ break;
+
default:
ret = -ENOSYS;
}
@@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* @dma_length: Total length of the DMA transfer
* @direction: Direction of the transfer
*/
-static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
enum dma_slave_buswidth dev_width, unsigned int dma_length,
enum dma_transfer_direction direction)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
+ struct edmacc_param *param = &epset->param;
int acnt, bcnt, ccnt, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
int absync;
acnt = dev_width;
+
+ /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
+ if (!burst)
+ burst = 1;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
@@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
cidx = acnt * bcnt;
}
+ epset->len = dma_length;
+
if (direction == DMA_MEM_TO_DEV) {
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
+ epset->addr = src_addr;
} else if (direction == DMA_DEV_TO_MEM) {
src_bidx = 0;
src_cidx = 0;
dst_bidx = acnt;
dst_cidx = cidx;
+ epset->addr = dst_addr;
+ } else if (direction == DMA_MEM_TO_MEM) {
+ src_bidx = acnt;
+ src_cidx = cidx;
+ dst_bidx = acnt;
+ dst_cidx = cidx;
} else {
dev_err(dev, "%s: direction not implemented yet\n", __func__);
return -EINVAL;
}
- pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
/* Configure A or AB synchronized transfers */
if (absync)
- pset->opt |= SYNCDIM;
+ param->opt |= SYNCDIM;
- pset->src = src_addr;
- pset->dst = dst_addr;
+ param->src = src_addr;
+ param->dst = dst_addr;
- pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
- pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+ param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
- pset->a_b_cnt = bcnt << 16 | acnt;
- pset->ccnt = ccnt;
+ param->a_b_cnt = bcnt << 16 | acnt;
+ param->ccnt = ccnt;
/*
* Only time when (bcntrld) auto reload is required is for
* A-sync case, and in this case, a requirement of reload value
* of SZ_64K-1 only is assured. 'link' is initially set to NULL
* and then later will be populated by edma_execute.
*/
- pset->link_bcntrld = 0xffffffff;
+ param->link_bcntrld = 0xffffffff;
return absync;
}
@@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
- dev_err(dev, "%s: bad direction?\n", __func__);
+ dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
return NULL;
}
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
- dev_err(dev, "Undefined slave buswidth\n");
+ dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
return NULL;
}
edesc = kzalloc(sizeof(*edesc) + sg_len *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
}
edesc->pset_nr = sg_len;
+ edesc->residue = 0;
+ edesc->direction = direction;
+ edesc->echan = echan;
/* Allocate a PaRAM slot, if needed */
nslots = min_t(unsigned, MAX_NR_SG, sg_len);
@@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
kfree(edesc);
- dev_err(dev, "Failed to allocate slot\n");
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
return NULL;
}
}
@@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
}
edesc->absync = ret;
+ edesc->residue += sg_dma_len(sg);
/* If this is the last in a current SG set of transactions,
enable interrupts so that next set is processed */
if (!((i+1) % MAX_NR_SG))
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
}
+ edesc->residue_stat = edesc->residue;
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long tx_flags)
+{
+ int ret;
+ struct edma_desc *edesc;
+ struct device *dev = chan->device->dev;
+ struct edma_chan *echan = to_edma_chan(chan);
+
+ if (unlikely(!echan || !len))
+ return NULL;
+
+ edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (!edesc) {
+ dev_dbg(dev, "Failed to allocate a descriptor\n");
+ return NULL;
+ }
+
+ edesc->pset_nr = 1;
+
+ ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
+ DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
+ if (ret < 0)
+ return NULL;
+
+ edesc->absync = ret;
+
+ /*
+ * Enable intermediate transfer chaining to re-trigger channel
+ * on completion of every TR, and enable transfer-completion
+ * interrupt on completion of the whole transfer.
+ */
+ edesc->pset[0].param.opt |= ITCCHEN;
+ edesc->pset[0].param.opt |= TCINTEN;
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
- dev_err(dev, "%s: bad direction?\n", __func__);
+ dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
return NULL;
}
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
- dev_err(dev, "Undefined slave buswidth\n");
+ dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
return NULL;
}
@@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edesc = kzalloc(sizeof(*edesc) + nslots *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
}
edesc->cyclic = 1;
edesc->pset_nr = nslots;
+ edesc->residue = edesc->residue_stat = buf_len;
+ edesc->direction = direction;
+ edesc->echan = echan;
- dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
- dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
- dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
+ dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
+ __func__, echan->ch_num, nslots, period_len, buf_len);
for (i = 0; i < nslots; i++) {
/* Allocate a PaRAM slot, if needed */
@@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
kfree(edesc);
- dev_err(dev, "Failed to allocate slot\n");
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
return NULL;
}
}
@@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
else
src_addr += period_len;
- dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
- dev_dbg(dev,
+ dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
+ dev_vdbg(dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
" slot\t%d\n"
@@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
" cidx\t%08x\n"
" lkrld\t%08x\n",
i, echan->ch_num, echan->slot[i],
- edesc->pset[i].opt,
- edesc->pset[i].src,
- edesc->pset[i].dst,
- edesc->pset[i].a_b_cnt,
- edesc->pset[i].ccnt,
- edesc->pset[i].src_dst_bidx,
- edesc->pset[i].src_dst_cidx,
- edesc->pset[i].link_bcntrld);
+ edesc->pset[i].param.opt,
+ edesc->pset[i].param.src,
+ edesc->pset[i].param.dst,
+ edesc->pset[i].param.a_b_cnt,
+ edesc->pset[i].param.ccnt,
+ edesc->pset[i].param.src_dst_bidx,
+ edesc->pset[i].param.src_dst_cidx,
+ edesc->pset[i].param.link_bcntrld);
edesc->absync = ret;
@@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* Enable interrupts for every period because callback
* has to be called for every period.
*/
- edesc->pset[i].opt |= TCINTEN;
+ edesc->pset[i].param.opt |= TCINTEN;
}
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
struct edma_chan *echan = data;
struct device *dev = echan->vchan.chan.device->dev;
struct edma_desc *edesc;
- unsigned long flags;
struct edmacc_param p;
edesc = echan->edesc;
@@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
switch (ch_status) {
case EDMA_DMA_COMPLETE:
- spin_lock_irqsave(&echan->vchan.lock, flags);
+ spin_lock(&echan->vchan.lock);
if (edesc) {
if (edesc->cyclic) {
vchan_cyclic_callback(&edesc->vdesc);
} else if (edesc->processed == edesc->pset_nr) {
dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+ edesc->residue = 0;
edma_stop(echan->ch_num);
vchan_cookie_complete(&edesc->vdesc);
edma_execute(echan);
} else {
dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+
edma_execute(echan);
}
}
- spin_unlock_irqrestore(&echan->vchan.lock, flags);
+ spin_unlock(&echan->vchan.lock);
break;
case EDMA_DMA_CC_ERROR:
- spin_lock_irqsave(&echan->vchan.lock, flags);
+ spin_lock(&echan->vchan.lock);
edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
edma_trigger_channel(echan->ch_num);
}
- spin_unlock_irqrestore(&echan->vchan.lock, flags);
+ spin_unlock(&echan->vchan.lock);
break;
default:
@@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
echan->alloced = true;
echan->slot[0] = echan->ch_num;
- dev_dbg(dev, "allocated channel for %u:%u\n",
+ dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
return 0;
@@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&echan->vchan.lock, flags);
}
-static size_t edma_desc_size(struct edma_desc *edesc)
+static u32 edma_residue(struct edma_desc *edesc)
{
+ bool dst = edesc->direction == DMA_DEV_TO_MEM;
+ struct edma_pset *pset = edesc->pset;
+ dma_addr_t done, pos;
int i;
- size_t size;
-
- if (edesc->absync)
- for (size = i = 0; i < edesc->pset_nr; i++)
- size += (edesc->pset[i].a_b_cnt & 0xffff) *
- (edesc->pset[i].a_b_cnt >> 16) *
- edesc->pset[i].ccnt;
- else
- size = (edesc->pset[0].a_b_cnt & 0xffff) *
- (edesc->pset[0].a_b_cnt >> 16) +
- (edesc->pset[0].a_b_cnt & 0xffff) *
- (SZ_64K - 1) * edesc->pset[0].ccnt;
-
- return size;
+
+ /*
+ * We always read the dst/src position from the first RamPar
+ * pset. That's the one which is active now.
+ */
+ pos = edma_get_position(edesc->echan->slot[0], dst);
+
+ /*
+ * Cyclic is simple. Just subtract pset[0].addr from pos.
+ *
+ * We never update edesc->residue in the cyclic case, so we
+ * can tell the remaining room to the end of the circular
+ * buffer.
+ */
+ if (edesc->cyclic) {
+ done = pos - pset->addr;
+ edesc->residue_stat = edesc->residue - done;
+ return edesc->residue_stat;
+ }
+
+ /*
+ * For SG operation we catch up with the last processed
+ * status.
+ */
+ pset += edesc->processed_stat;
+
+ for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
+ /*
+ * If we are inside this pset address range, we know
+ * this is the active one. Get the current delta and
+ * stop walking the psets.
+ */
+ if (pos >= pset->addr && pos < pset->addr + pset->len)
+ return edesc->residue_stat - (pos - pset->addr);
+
+ /* Otherwise mark it done and update residue_stat. */
+ edesc->processed_stat++;
+ edesc->residue_stat -= pset->len;
+ }
+ return edesc->residue_stat;
}
/* Check request completion status */
@@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
return ret;
spin_lock_irqsave(&echan->vchan.lock, flags);
- vdesc = vchan_find_desc(&echan->vchan, cookie);
- if (vdesc) {
- txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
- } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
- struct edma_desc *edesc = echan->edesc;
- txstate->residue = edma_desc_size(edesc);
- }
+ if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
+ txstate->residue = edma_residue(echan->edesc);
+ else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
+ txstate->residue = to_edma_desc(&vdesc->tx)->residue;
spin_unlock_irqrestore(&echan->vchan.lock, flags);
return ret;
@@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc,
}
}
+#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int edma_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ return 0;
+}
+
static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
struct device *dev)
{
dma->device_prep_slave_sg = edma_prep_slave_sg;
dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
+ dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
dma->device_free_chan_resources = edma_free_chan_resources;
dma->device_issue_pending = edma_issue_pending;
dma->device_tx_status = edma_tx_status;
dma->device_control = edma_control;
+ dma->device_slave_caps = edma_dma_device_slave_caps;
dma->dev = dev;
+ /*
+ * code using dma memcpy must make sure alignment of
+ * length is at dma->copy_align boundary.
+ */
+ dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
INIT_LIST_HEAD(&dma->channels);
}
@@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev)
dma_cap_zero(ecc->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index b59a17fb7c3..ff7138fd66d 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -2,7 +2,7 @@
* Marvell EBU SoC Device Bus Controller
* (memory controller for NOR/NAND/SRAM/FPGA devices)
*
- * Copyright (C) 2013 Marvell
+ * Copyright (C) 2013-2014 Marvell
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,19 +30,47 @@
#include <linux/platform_device.h>
/* Register definitions */
-#define DEV_WIDTH_BIT 30
-#define BADR_SKEW_BIT 28
-#define RD_HOLD_BIT 23
-#define ACC_NEXT_BIT 17
-#define RD_SETUP_BIT 12
-#define ACC_FIRST_BIT 6
-
-#define SYNC_ENABLE_BIT 24
-#define WR_HIGH_BIT 16
-#define WR_LOW_BIT 8
-
-#define READ_PARAM_OFFSET 0x0
-#define WRITE_PARAM_OFFSET 0x4
+#define ARMADA_DEV_WIDTH_SHIFT 30
+#define ARMADA_BADR_SKEW_SHIFT 28
+#define ARMADA_RD_HOLD_SHIFT 23
+#define ARMADA_ACC_NEXT_SHIFT 17
+#define ARMADA_RD_SETUP_SHIFT 12
+#define ARMADA_ACC_FIRST_SHIFT 6
+
+#define ARMADA_SYNC_ENABLE_SHIFT 24
+#define ARMADA_WR_HIGH_SHIFT 16
+#define ARMADA_WR_LOW_SHIFT 8
+
+#define ARMADA_READ_PARAM_OFFSET 0x0
+#define ARMADA_WRITE_PARAM_OFFSET 0x4
+
+#define ORION_RESERVED (0x2 << 30)
+#define ORION_BADR_SKEW_SHIFT 28
+#define ORION_WR_HIGH_EXT_BIT BIT(27)
+#define ORION_WR_HIGH_EXT_MASK 0x8
+#define ORION_WR_LOW_EXT_BIT BIT(26)
+#define ORION_WR_LOW_EXT_MASK 0x8
+#define ORION_ALE_WR_EXT_BIT BIT(25)
+#define ORION_ALE_WR_EXT_MASK 0x8
+#define ORION_ACC_NEXT_EXT_BIT BIT(24)
+#define ORION_ACC_NEXT_EXT_MASK 0x10
+#define ORION_ACC_FIRST_EXT_BIT BIT(23)
+#define ORION_ACC_FIRST_EXT_MASK 0x10
+#define ORION_TURN_OFF_EXT_BIT BIT(22)
+#define ORION_TURN_OFF_EXT_MASK 0x8
+#define ORION_DEV_WIDTH_SHIFT 20
+#define ORION_WR_HIGH_SHIFT 17
+#define ORION_WR_HIGH_MASK 0x7
+#define ORION_WR_LOW_SHIFT 14
+#define ORION_WR_LOW_MASK 0x7
+#define ORION_ALE_WR_SHIFT 11
+#define ORION_ALE_WR_MASK 0x7
+#define ORION_ACC_NEXT_SHIFT 7
+#define ORION_ACC_NEXT_MASK 0xF
+#define ORION_ACC_FIRST_SHIFT 3
+#define ORION_ACC_FIRST_MASK 0xF
+#define ORION_TURN_OFF_SHIFT 0
+#define ORION_TURN_OFF_MASK 0x7
struct devbus_read_params {
u32 bus_width;
@@ -89,19 +117,14 @@ static int get_timing_param_ps(struct devbus *devbus,
return 0;
}
-static int devbus_set_timing_params(struct devbus *devbus,
- struct device_node *node)
+static int devbus_get_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
{
- struct devbus_read_params r;
- struct devbus_write_params w;
- u32 value;
int err;
- dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n",
- devbus->tick_ps);
-
- /* Get read timings */
- err = of_property_read_u32(node, "devbus,bus-width", &r.bus_width);
+ err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width);
if (err < 0) {
dev_err(devbus->dev,
"%s has no 'devbus,bus-width' property\n",
@@ -113,104 +136,148 @@ static int devbus_set_timing_params(struct devbus *devbus,
* The bus width is encoded into the register as 0 for 8 bits,
* and 1 for 16 bits, so we do the necessary conversion here.
*/
- if (r.bus_width == 8)
- r.bus_width = 0;
- else if (r.bus_width == 16)
- r.bus_width = 1;
+ if (r->bus_width == 8)
+ r->bus_width = 0;
+ else if (r->bus_width == 16)
+ r->bus_width = 1;
else {
- dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
+ dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width);
return -EINVAL;
}
err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
- &r.badr_skew);
+ &r->badr_skew);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
- &r.turn_off);
+ &r->turn_off);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
- &r.acc_first);
+ &r->acc_first);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
- &r.acc_next);
- if (err < 0)
- return err;
-
- err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
- &r.rd_setup);
+ &r->acc_next);
if (err < 0)
return err;
- err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
- &r.rd_hold);
- if (err < 0)
- return err;
-
- /* Get write timings */
- err = of_property_read_u32(node, "devbus,sync-enable",
- &w.sync_enable);
- if (err < 0) {
- dev_err(devbus->dev,
- "%s has no 'devbus,sync-enable' property\n",
- node->full_name);
- return err;
+ if (of_device_is_compatible(devbus->dev->of_node, "marvell,mvebu-devbus")) {
+ err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
+ &r->rd_setup);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
+ &r->rd_hold);
+ if (err < 0)
+ return err;
+
+ err = of_property_read_u32(node, "devbus,sync-enable",
+ &w->sync_enable);
+ if (err < 0) {
+ dev_err(devbus->dev,
+ "%s has no 'devbus,sync-enable' property\n",
+ node->full_name);
+ return err;
+ }
}
err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
- &w.ale_wr);
+ &w->ale_wr);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
- &w.wr_low);
+ &w->wr_low);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
- &w.wr_high);
+ &w->wr_high);
if (err < 0)
return err;
+ return 0;
+}
+
+static void devbus_orion_set_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
+{
+ u32 value;
+
+ /*
+ * The hardware designers found it would be a good idea to
+ * split most of the values in the register into two fields:
+ * one containing all the low-order bits, and another one
+ * containing just the high-order bit. For all of those
+ * fields, we have to split the value into these two parts.
+ */
+ value = (r->turn_off & ORION_TURN_OFF_MASK) << ORION_TURN_OFF_SHIFT |
+ (r->acc_first & ORION_ACC_FIRST_MASK) << ORION_ACC_FIRST_SHIFT |
+ (r->acc_next & ORION_ACC_NEXT_MASK) << ORION_ACC_NEXT_SHIFT |
+ (w->ale_wr & ORION_ALE_WR_MASK) << ORION_ALE_WR_SHIFT |
+ (w->wr_low & ORION_WR_LOW_MASK) << ORION_WR_LOW_SHIFT |
+ (w->wr_high & ORION_WR_HIGH_MASK) << ORION_WR_HIGH_SHIFT |
+ r->bus_width << ORION_DEV_WIDTH_SHIFT |
+ ((r->turn_off & ORION_TURN_OFF_EXT_MASK) ? ORION_TURN_OFF_EXT_BIT : 0) |
+ ((r->acc_first & ORION_ACC_FIRST_EXT_MASK) ? ORION_ACC_FIRST_EXT_BIT : 0) |
+ ((r->acc_next & ORION_ACC_NEXT_EXT_MASK) ? ORION_ACC_NEXT_EXT_BIT : 0) |
+ ((w->ale_wr & ORION_ALE_WR_EXT_MASK) ? ORION_ALE_WR_EXT_BIT : 0) |
+ ((w->wr_low & ORION_WR_LOW_EXT_MASK) ? ORION_WR_LOW_EXT_BIT : 0) |
+ ((w->wr_high & ORION_WR_HIGH_EXT_MASK) ? ORION_WR_HIGH_EXT_BIT : 0) |
+ (r->badr_skew << ORION_BADR_SKEW_SHIFT) |
+ ORION_RESERVED;
+
+ writel(value, devbus->base);
+}
+
+static void devbus_armada_set_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
+{
+ u32 value;
+
/* Set read timings */
- value = r.bus_width << DEV_WIDTH_BIT |
- r.badr_skew << BADR_SKEW_BIT |
- r.rd_hold << RD_HOLD_BIT |
- r.acc_next << ACC_NEXT_BIT |
- r.rd_setup << RD_SETUP_BIT |
- r.acc_first << ACC_FIRST_BIT |
- r.turn_off;
+ value = r->bus_width << ARMADA_DEV_WIDTH_SHIFT |
+ r->badr_skew << ARMADA_BADR_SKEW_SHIFT |
+ r->rd_hold << ARMADA_RD_HOLD_SHIFT |
+ r->acc_next << ARMADA_ACC_NEXT_SHIFT |
+ r->rd_setup << ARMADA_RD_SETUP_SHIFT |
+ r->acc_first << ARMADA_ACC_FIRST_SHIFT |
+ r->turn_off;
dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n",
- devbus->base + READ_PARAM_OFFSET,
+ devbus->base + ARMADA_READ_PARAM_OFFSET,
value);
- writel(value, devbus->base + READ_PARAM_OFFSET);
+ writel(value, devbus->base + ARMADA_READ_PARAM_OFFSET);
/* Set write timings */
- value = w.sync_enable << SYNC_ENABLE_BIT |
- w.wr_low << WR_LOW_BIT |
- w.wr_high << WR_HIGH_BIT |
- w.ale_wr;
+ value = w->sync_enable << ARMADA_SYNC_ENABLE_SHIFT |
+ w->wr_low << ARMADA_WR_LOW_SHIFT |
+ w->wr_high << ARMADA_WR_HIGH_SHIFT |
+ w->ale_wr;
dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n",
- devbus->base + WRITE_PARAM_OFFSET,
+ devbus->base + ARMADA_WRITE_PARAM_OFFSET,
value);
- writel(value, devbus->base + WRITE_PARAM_OFFSET);
-
- return 0;
+ writel(value, devbus->base + ARMADA_WRITE_PARAM_OFFSET);
}
static int mvebu_devbus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
+ struct devbus_read_params r;
+ struct devbus_write_params w;
struct devbus *devbus;
struct resource *res;
struct clk *clk;
@@ -240,10 +307,21 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
rate = clk_get_rate(clk) / 1000;
devbus->tick_ps = 1000000000 / rate;
- /* Read the device tree node and set the new timing parameters */
- err = devbus_set_timing_params(devbus, node);
- if (err < 0)
- return err;
+ dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n",
+ devbus->tick_ps);
+
+ if (!of_property_read_bool(node, "devbus,keep-config")) {
+ /* Read the Device Tree node */
+ err = devbus_get_timing_params(devbus, node, &r, &w);
+ if (err < 0)
+ return err;
+
+ /* Set the new timing parameters */
+ if (of_device_is_compatible(node, "marvell,orion-devbus"))
+ devbus_orion_set_timing_params(devbus, node, &r, &w);
+ else
+ devbus_armada_set_timing_params(devbus, node, &r, &w);
+ }
/*
* We need to create a child device explicitly from here to
@@ -259,6 +337,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
static const struct of_device_id mvebu_devbus_of_match[] = {
{ .compatible = "marvell,mvebu-devbus" },
+ { .compatible = "marvell,orion-devbus" },
{},
};
MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 49b46e6ca95..bdcf5173e37 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -51,6 +51,13 @@ config POWER_RESET_RESTART
Instead they restart, and u-boot holds the SoC until the
user presses a key. u-boot then boots into Linux.
+config POWER_RESET_SUN6I
+ bool "Allwinner A31 SoC reset driver"
+ depends on ARCH_SUNXI
+ depends on POWER_RESET
+ help
+ Reboot support for the Allwinner A31 SoCs.
+
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
@@ -65,3 +72,11 @@ config POWER_RESET_XGENE
depends on POWER_RESET
help
Reboot support for the APM SoC X-Gene Eval boards.
+
+config POWER_RESET_KEYSTONE
+ bool "Keystone reset driver"
+ depends on ARCH_KEYSTONE
+ select MFD_SYSCON
+ help
+ Reboot support for the KEYSTONE SoCs.
+
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 16c0516e5a1..dde2e8bbac5 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -4,5 +4,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
+obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o
obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
+obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
new file mode 100644
index 00000000000..408a18fd91c
--- /dev/null
+++ b/drivers/power/reset/keystone-reset.c
@@ -0,0 +1,166 @@
+/*
+ * TI keystone reboot driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <asm/system_misc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+
+#define RSTYPE_RG 0x0
+#define RSCTRL_RG 0x4
+#define RSCFG_RG 0x8
+#define RSISO_RG 0xc
+
+#define RSCTRL_KEY_MASK 0x0000ffff
+#define RSCTRL_RESET_MASK BIT(16)
+#define RSCTRL_KEY 0x5a69
+
+#define RSMUX_OMODE_MASK 0xe
+#define RSMUX_OMODE_RESET_ON 0xa
+#define RSMUX_OMODE_RESET_OFF 0x0
+#define RSMUX_LOCK_MASK 0x1
+#define RSMUX_LOCK_SET 0x1
+
+#define RSCFG_RSTYPE_SOFT 0x300f
+#define RSCFG_RSTYPE_HARD 0x0
+
+#define WDT_MUX_NUMBER 0x4
+
+static int rspll_offset;
+static struct regmap *pllctrl_regs;
+
+/**
+ * rsctrl_enable_rspll_write - enable access to RSCTRL, RSCFG
+ * To be able to access to RSCTRL, RSCFG registers
+ * we have to write a key before
+ */
+static inline int rsctrl_enable_rspll_write(void)
+{
+ return regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
+ RSCTRL_KEY_MASK, RSCTRL_KEY);
+}
+
+static void rsctrl_restart(enum reboot_mode mode, const char *cmd)
+{
+ /* enable write access to RSTCTRL */
+ rsctrl_enable_rspll_write();
+
+ /* reset the SOC */
+ regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG,
+ RSCTRL_RESET_MASK, 0);
+}
+
+static struct of_device_id rsctrl_of_match[] = {
+ {.compatible = "ti,keystone-reset", },
+ {},
+};
+
+static int rsctrl_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+ u32 val;
+ unsigned int rg;
+ u32 rsmux_offset;
+ struct regmap *devctrl_regs;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ /* get regmaps */
+ pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll");
+ if (IS_ERR(pllctrl_regs))
+ return PTR_ERR(pllctrl_regs);
+
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ if (IS_ERR(devctrl_regs))
+ return PTR_ERR(devctrl_regs);
+
+ ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset);
+ if (ret) {
+ dev_err(dev, "couldn't read the reset pll offset!\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset);
+ if (ret) {
+ dev_err(dev, "couldn't read the rsmux offset!\n");
+ return -EINVAL;
+ }
+
+ /* set soft/hard reset */
+ val = of_property_read_bool(np, "ti,soft-reset");
+ val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD;
+
+ ret = rsctrl_enable_rspll_write();
+ if (ret)
+ return ret;
+
+ ret = regmap_write(pllctrl_regs, rspll_offset + RSCFG_RG, val);
+ if (ret)
+ return ret;
+
+ arm_pm_restart = rsctrl_restart;
+
+ /* disable a reset isolation for all module clocks */
+ ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0);
+ if (ret)
+ return ret;
+
+ /* enable a reset for watchdogs from wdt-list */
+ for (i = 0; i < WDT_MUX_NUMBER; i++) {
+ ret = of_property_read_u32_index(np, "ti,wdt-list", i, &val);
+ if (ret == -EOVERFLOW && !i) {
+ dev_err(dev, "ti,wdt-list property has to contain at"
+ "least one entry\n");
+ return -EINVAL;
+ } else if (ret) {
+ break;
+ }
+
+ if (val >= WDT_MUX_NUMBER) {
+ dev_err(dev, "ti,wdt-list property can contain"
+ "only numbers < 4\n");
+ return -EINVAL;
+ }
+
+ rg = rsmux_offset + val * 4;
+
+ ret = regmap_update_bits(devctrl_regs, rg, RSMUX_OMODE_MASK,
+ RSMUX_OMODE_RESET_ON |
+ RSMUX_LOCK_SET);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver rsctrl_driver = {
+ .probe = rsctrl_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .of_match_table = rsctrl_of_match,
+ },
+};
+module_platform_driver(rsctrl_driver);
+
+MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments keystone reset driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/power/reset/sun6i-reboot.c b/drivers/power/reset/sun6i-reboot.c
new file mode 100644
index 00000000000..af2cd7ff2fe
--- /dev/null
+++ b/drivers/power/reset/sun6i-reboot.c
@@ -0,0 +1,85 @@
+/*
+ * Allwinner A31 SoCs reset code
+ *
+ * Copyright (C) 2012-2014 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+#define SUN6I_WATCHDOG1_IRQ_REG 0x00
+#define SUN6I_WATCHDOG1_CTRL_REG 0x10
+#define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_REG 0x14
+#define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1)
+#define SUN6I_WATCHDOG1_MODE_REG 0x18
+#define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0)
+
+static void __iomem *wdt_base;
+
+static void sun6i_wdt_restart(enum reboot_mode mode, const char *cmd)
+{
+ if (!wdt_base)
+ return;
+
+ /* Disable interrupts */
+ writel(0, wdt_base + SUN6I_WATCHDOG1_IRQ_REG);
+
+ /* We want to disable the IRQ and just reset the whole system */
+ writel(SUN6I_WATCHDOG1_CONFIG_RESTART,
+ wdt_base + SUN6I_WATCHDOG1_CONFIG_REG);
+
+ /* Enable timer. The default and lowest interval value is 0.5s */
+ writel(SUN6I_WATCHDOG1_MODE_ENABLE,
+ wdt_base + SUN6I_WATCHDOG1_MODE_REG);
+
+ /* Restart the watchdog. */
+ writel(SUN6I_WATCHDOG1_CTRL_RESTART,
+ wdt_base + SUN6I_WATCHDOG1_CTRL_REG);
+
+ while (1) {
+ mdelay(5);
+ writel(SUN6I_WATCHDOG1_MODE_ENABLE,
+ wdt_base + SUN6I_WATCHDOG1_MODE_REG);
+ }
+}
+
+static int sun6i_reboot_probe(struct platform_device *pdev)
+{
+ wdt_base = of_iomap(pdev->dev.of_node, 0);
+ if (!wdt_base) {
+ WARN(1, "failed to map watchdog base address");
+ return -ENODEV;
+ }
+
+ arm_pm_restart = sun6i_wdt_restart;
+
+ return 0;
+}
+
+static struct of_device_id sun6i_reboot_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-wdt" },
+ {}
+};
+
+static struct platform_driver sun6i_reboot_driver = {
+ .probe = sun6i_reboot_probe,
+ .driver = {
+ .name = "sun6i-reboot",
+ .of_match_table = sun6i_reboot_of_match,
+ },
+};
+module_platform_driver(sun6i_reboot_driver);
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 4f60caf750c..60fed3d7820 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_RESET_CONTROLLER) += core.o
+obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
obj-$(CONFIG_ARCH_STI) += sti/
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
new file mode 100644
index 00000000000..79c32ca84ef
--- /dev/null
+++ b/drivers/reset/reset-socfpga.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * based on
+ * Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define NR_BANKS 4
+#define OFFSET_MODRST 0x10
+
+struct socfpga_reset_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct socfpga_reset_data *data = container_of(rcdev,
+ struct socfpga_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
+ writel(reg | BIT(offset), data->membase + OFFSET_MODRST +
+ (bank * NR_BANKS));
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct socfpga_reset_data *data = container_of(rcdev,
+ struct socfpga_reset_data,
+ rcdev);
+
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
+ writel(reg & ~BIT(offset), data->membase + OFFSET_MODRST +
+ (bank * NR_BANKS));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops socfpga_reset_ops = {
+ .assert = socfpga_reset_assert,
+ .deassert = socfpga_reset_deassert,
+};
+
+static int socfpga_reset_probe(struct platform_device *pdev)
+{
+ struct socfpga_reset_data *data;
+ struct resource *res;
+
+ /*
+ * The binding was mainlined without the required property.
+ * Do not continue, when we encounter an old DT.
+ */
+ if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+ dev_err(&pdev->dev, "%s missing #reset-cells property\n",
+ pdev->dev.of_node->full_name);
+ return -EINVAL;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase))
+ return PTR_ERR(data->membase);
+
+ spin_lock_init(&data->lock);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG;
+ data->rcdev.ops = &socfpga_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+ reset_controller_register(&data->rcdev);
+
+ return 0;
+}
+
+static int socfpga_reset_remove(struct platform_device *pdev)
+{
+ struct socfpga_reset_data *data = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&data->rcdev);
+
+ return 0;
+}
+
+static const struct of_device_id socfpga_reset_dt_ids[] = {
+ { .compatible = "altr,rst-mgr", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver socfpga_reset_driver = {
+ .probe = socfpga_reset_probe,
+ .remove = socfpga_reset_remove,
+ .driver = {
+ .name = "socfpga-reset",
+ .owner = THIS_MODULE,
+ .of_match_table = socfpga_reset_dt_ids,
+ },
+};
+module_platform_driver(socfpga_reset_driver);
+
+MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
+MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 695bd3496eb..a94e7a7820b 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -145,7 +145,24 @@ MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
static int sunxi_reset_probe(struct platform_device *pdev)
{
- return sunxi_reset_init(pdev->dev.of_node);
+ struct sunxi_reset_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase))
+ return PTR_ERR(data->membase);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = resource_size(res) * 32;
+ data->rcdev.ops = &sunxi_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return reset_controller_register(&data->rcdev);
}
static int sunxi_reset_remove(struct platform_device *pdev)
@@ -153,8 +170,6 @@ static int sunxi_reset_remove(struct platform_device *pdev)
struct sunxi_reset_data *data = platform_get_drvdata(pdev);
reset_controller_unregister(&data->rcdev);
- iounmap(data->membase);
- kfree(data);
return 0;
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
new file mode 100644
index 00000000000..c8543855aa8
--- /dev/null
+++ b/drivers/soc/Kconfig
@@ -0,0 +1,5 @@
+menu "SOC (System On Chip) specific Drivers"
+
+source "drivers/soc/qcom/Kconfig"
+
+endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
new file mode 100644
index 00000000000..0f7c44793b2
--- /dev/null
+++ b/drivers/soc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Linux Kernel SOC specific device drivers.
+#
+
+obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
new file mode 100644
index 00000000000..7bd2c94f54a
--- /dev/null
+++ b/drivers/soc/qcom/Kconfig
@@ -0,0 +1,11 @@
+#
+# QCOM Soc drivers
+#
+config QCOM_GSBI
+ tristate "QCOM General Serial Bus Interface"
+ depends on ARCH_QCOM
+ help
+ Say y here to enable GSBI support. The GSBI provides control
+ functions for connecting the underlying serial UART, SPI, and I2C
+ devices to the output pins.
+
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
new file mode 100644
index 00000000000..438901257ac
--- /dev/null
+++ b/drivers/soc/qcom/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 00000000000..447458e696a
--- /dev/null
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define GSBI_CTRL_REG 0x0000
+#define GSBI_PROTOCOL_SHIFT 4
+
+static int gsbi_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+ void __iomem *base;
+ struct clk *hclk;
+ u32 mode, crci = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_property_read_u32(node, "qcom,mode", &mode)) {
+ dev_err(&pdev->dev, "missing mode configuration\n");
+ return -EINVAL;
+ }
+
+ /* not required, so default to 0 if not present */
+ of_property_read_u32(node, "qcom,crci", &crci);
+
+ dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci);
+
+ hclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(hclk))
+ return PTR_ERR(hclk);
+
+ clk_prepare_enable(hclk);
+
+ writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci,
+ base + GSBI_CTRL_REG);
+
+ /* make sure the gsbi control write is not reordered */
+ wmb();
+
+ clk_disable_unprepare(hclk);
+
+ return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id gsbi_dt_match[] = {
+ { .compatible = "qcom,gsbi-v1.0.0", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, gsbi_dt_match);
+
+static struct platform_driver gsbi_driver = {
+ .driver = {
+ .name = "gsbi",
+ .owner = THIS_MODULE,
+ .of_match_table = gsbi_dt_match,
+ },
+ .probe = gsbi_probe,
+};
+
+module_platform_driver(gsbi_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM GSBI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 053b98eb46c..778e376f197 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -52,7 +52,6 @@ struct msm_port {
struct clk *clk;
struct clk *pclk;
unsigned int imr;
- void __iomem *gsbi_base;
int is_uartdm;
unsigned int old_snap_state;
};
@@ -599,9 +598,7 @@ static const char *msm_type(struct uart_port *port)
static void msm_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
- struct msm_port *msm_port = UART_TO_MSM(port);
struct resource *uart_resource;
- struct resource *gsbi_resource;
resource_size_t size;
uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -612,28 +609,12 @@ static void msm_release_port(struct uart_port *port)
release_mem_region(port->mapbase, size);
iounmap(port->membase);
port->membase = NULL;
-
- if (msm_port->gsbi_base) {
- writel_relaxed(GSBI_PROTOCOL_IDLE,
- msm_port->gsbi_base + GSBI_CONTROL);
-
- gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (unlikely(!gsbi_resource))
- return;
-
- size = resource_size(gsbi_resource);
- release_mem_region(gsbi_resource->start, size);
- iounmap(msm_port->gsbi_base);
- msm_port->gsbi_base = NULL;
- }
}
static int msm_request_port(struct uart_port *port)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
struct platform_device *pdev = to_platform_device(port->dev);
struct resource *uart_resource;
- struct resource *gsbi_resource;
resource_size_t size;
int ret;
@@ -652,30 +633,8 @@ static int msm_request_port(struct uart_port *port)
goto fail_release_port;
}
- gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- /* Is this a GSBI-based port? */
- if (gsbi_resource) {
- size = resource_size(gsbi_resource);
-
- if (!request_mem_region(gsbi_resource->start, size,
- "msm_serial")) {
- ret = -EBUSY;
- goto fail_release_port_membase;
- }
-
- msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
- if (!msm_port->gsbi_base) {
- ret = -EBUSY;
- goto fail_release_gsbi;
- }
- }
-
return 0;
-fail_release_gsbi:
- release_mem_region(gsbi_resource->start, size);
-fail_release_port_membase:
- iounmap(port->membase);
fail_release_port:
release_mem_region(port->mapbase, size);
return ret;
@@ -683,7 +642,6 @@ fail_release_port:
static void msm_config_port(struct uart_port *port, int flags)
{
- struct msm_port *msm_port = UART_TO_MSM(port);
int ret;
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_MSM;
@@ -691,9 +649,6 @@ static void msm_config_port(struct uart_port *port, int flags)
if (ret)
return;
}
- if (msm_port->gsbi_base)
- writel_relaxed(GSBI_PROTOCOL_UART,
- msm_port->gsbi_base + GSBI_CONTROL);
}
static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -1110,6 +1065,7 @@ static struct of_device_id msm_match_table[] = {
static struct platform_driver msm_platform_driver = {
.remove = msm_serial_remove,
+ .probe = msm_serial_probe,
.driver = {
.name = "msm_serial",
.owner = THIS_MODULE,
@@ -1125,7 +1081,7 @@ static int __init msm_serial_init(void)
if (unlikely(ret))
return ret;
- ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
+ ret = platform_driver_register(&msm_platform_driver);
if (unlikely(ret))
uart_unregister_driver(&msm_uart_driver);
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 1e9b68b6f9e..d98d45efdf8 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -109,11 +109,6 @@
#define UART_ISR 0x0014
#define UART_ISR_TX_READY (1 << 7)
-#define GSBI_CONTROL 0x0
-#define GSBI_PROTOCOL_CODE 0x30
-#define GSBI_PROTOCOL_UART 0x40
-#define GSBI_PROTOCOL_IDLE 0x0
-
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
#define UARTDM_RXFS_BUF_MASK 0x7