summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c151
-rw-r--r--drivers/char/tpm/tpm.c530
-rw-r--r--drivers/char/tpm/tpm.h142
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/sh_cmt.c615
-rw-r--r--drivers/cpufreq/cpufreq.c55
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c404
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c74
-rw-r--r--drivers/cpufreq/cpufreq_stats.c74
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c27
-rw-r--r--drivers/cpufreq/freq_table.c18
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c293
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c1310
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h177
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h284
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c108
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h243
-rw-r--r--drivers/ide/Kconfig8
-rw-r--r--drivers/ide/Makefile4
-rw-r--r--drivers/ide/aec62xx.c4
-rw-r--r--drivers/ide/alim15x3.c2
-rw-r--r--drivers/ide/amd74xx.c14
-rw-r--r--drivers/ide/atiixp.c3
-rw-r--r--drivers/ide/cmd64x.c2
-rw-r--r--drivers/ide/cs5520.c3
-rw-r--r--drivers/ide/cs5530.c2
-rw-r--r--drivers/ide/delkin_cb.c2
-rw-r--r--drivers/ide/hpt366.c4
-rw-r--r--drivers/ide/ide-acpi.c214
-rw-r--r--drivers/ide/ide-atapi.c41
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-devsets.c190
-rw-r--r--drivers/ide/ide-dma.c57
-rw-r--r--drivers/ide/ide-eh.c428
-rw-r--r--drivers/ide/ide-io-std.c316
-rw-r--r--drivers/ide/ide-io.c320
-rw-r--r--drivers/ide/ide-iops.c742
-rw-r--r--drivers/ide/ide-lib.c240
-rw-r--r--drivers/ide/ide-park.c25
-rw-r--r--drivers/ide/ide-pci-generic.c4
-rw-r--r--drivers/ide/ide-probe.c257
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/ide/ide-xfer-mode.c246
-rw-r--r--drivers/ide/ide.c156
-rw-r--r--drivers/ide/it821x.c2
-rw-r--r--drivers/ide/ns87415.c4
-rw-r--r--drivers/ide/pdc202xx_new.c4
-rw-r--r--drivers/ide/pdc202xx_old.c4
-rw-r--r--drivers/ide/piix.c13
-rw-r--r--drivers/ide/serverworks.c13
-rw-r--r--drivers/ide/setup-pci.c27
-rw-r--r--drivers/ide/siimage.c2
-rw-r--r--drivers/ide/sis5513.c4
-rw-r--r--drivers/ide/sl82c105.c4
-rw-r--r--drivers/ide/slc90e66.c1
-rw-r--r--drivers/ide/trm290.c3
-rw-r--r--drivers/ide/via82cxxx.c12
-rw-r--r--drivers/input/joystick/maplecontrol.c4
-rw-r--r--drivers/input/keyboard/maple_keyb.c37
-rw-r--r--drivers/input/keyboard/sh_keysc.c26
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c7
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c7
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c8
-rw-r--r--drivers/mtd/maps/Kconfig12
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/vmu-flash.c832
-rw-r--r--drivers/rtc/rtc-sh.c246
-rw-r--r--drivers/serial/sh-sci.c12
-rw-r--r--drivers/serial/sh-sci.h15
-rw-r--r--drivers/sh/intc.c47
-rw-r--r--drivers/sh/maple/maple.c463
-rw-r--r--drivers/staging/go7007/snd-go7007.c7
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/gadget/gmidi.c7
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/video/pvr2fb.c16
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c66
82 files changed, 6953 insertions, 2786 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8822eca58ff..5fab6470f4b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -20,6 +20,20 @@ config HW_RANDOM
If unsure, say Y.
+config HW_RANDOM_TIMERIOMEM
+ tristate "Timer IOMEM HW Random Number Generator support"
+ depends on HW_RANDOM && HAS_IOMEM
+ ---help---
+ This driver provides kernel-side support for a generic Random
+ Number Generator used by reading a 'dumb' iomem address that
+ is to be read no faster than, for example, once a second;
+ the default FPGA bitstream on the TS-7800 has such functionality.
+
+ To compile this driver as a module, choose M here: the
+ module will be called timeriomem-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_INTEL
tristate "Intel HW Random Number Generator support"
depends on HW_RANDOM && (X86 || IA64) && PCI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b6effb7522c..e81d21a5f28 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_HW_RANDOM) += rng-core.o
rng-core-y := core.o
+obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
new file mode 100644
index 00000000000..10ad41be589
--- /dev/null
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -0,0 +1,151 @@
+/*
+ * drivers/char/hw_random/timeriomem-rng.c
+ *
+ * Copyright (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * Derived from drivers/char/hw_random/omap-rng.c
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This driver is useful for platforms that have an IO range that provides
+ * periodic random data from a single IO memory address. All the platform
+ * has to do is provide the address and 'wait time' that new data becomes
+ * available.
+ *
+ * TODO: add support for reading sizes other than 32bits and masking
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/timeriomem-rng.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+static struct timeriomem_rng_data *timeriomem_rng_data;
+
+static void timeriomem_rng_trigger(unsigned long);
+static DEFINE_TIMER(timeriomem_rng_timer, timeriomem_rng_trigger, 0, 0);
+
+/*
+ * have data return 1, however return 0 if we have nothing
+ */
+static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
+{
+ if (rng->priv == 0)
+ return 1;
+
+ if (!wait || timeriomem_rng_data->present)
+ return timeriomem_rng_data->present;
+
+ wait_for_completion(&timeriomem_rng_data->completion);
+
+ return 1;
+}
+
+static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ unsigned long cur;
+ s32 delay;
+
+ *data = readl(timeriomem_rng_data->address);
+
+ if (rng->priv != 0) {
+ cur = jiffies;
+
+ delay = cur - timeriomem_rng_timer.expires;
+ delay = rng->priv - (delay % rng->priv);
+
+ timeriomem_rng_timer.expires = cur + delay;
+ timeriomem_rng_data->present = 0;
+
+ init_completion(&timeriomem_rng_data->completion);
+ add_timer(&timeriomem_rng_timer);
+ }
+
+ return 4;
+}
+
+static void timeriomem_rng_trigger(unsigned long dummy)
+{
+ timeriomem_rng_data->present = 1;
+ complete(&timeriomem_rng_data->completion);
+}
+
+static struct hwrng timeriomem_rng_ops = {
+ .name = "timeriomem",
+ .data_present = timeriomem_rng_data_present,
+ .data_read = timeriomem_rng_data_read,
+ .priv = 0,
+};
+
+static int __init timeriomem_rng_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ timeriomem_rng_data = pdev->dev.platform_data;
+
+ if (timeriomem_rng_data->period != 0
+ && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
+ timeriomem_rng_timer.expires = jiffies;
+
+ timeriomem_rng_ops.priv = usecs_to_jiffies(
+ timeriomem_rng_data->period);
+ }
+ timeriomem_rng_data->present = 1;
+
+ ret = hwrng_register(&timeriomem_rng_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "problem registering\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
+ timeriomem_rng_data->address,
+ timeriomem_rng_data->period);
+
+ return 0;
+}
+
+static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
+{
+ del_timer_sync(&timeriomem_rng_timer);
+ hwrng_unregister(&timeriomem_rng_ops);
+
+ return 0;
+}
+
+static struct platform_driver timeriomem_rng_driver = {
+ .driver = {
+ .name = "timeriomem_rng",
+ .owner = THIS_MODULE,
+ },
+ .probe = timeriomem_rng_probe,
+ .remove = __devexit_p(timeriomem_rng_remove),
+};
+
+static int __init timeriomem_rng_init(void)
+{
+ return platform_driver_register(&timeriomem_rng_driver);
+}
+
+static void __exit timeriomem_rng_exit(void)
+{
+ platform_driver_unregister(&timeriomem_rng_driver);
+}
+
+module_init(timeriomem_rng_init);
+module_exit(timeriomem_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 9c47dc48c9f..ccdd828adce 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -429,134 +429,148 @@ out:
#define TPM_DIGEST_SIZE 20
#define TPM_ERROR_SIZE 10
#define TPM_RET_CODE_IDX 6
-#define TPM_GET_CAP_RET_SIZE_IDX 10
-#define TPM_GET_CAP_RET_UINT32_1_IDX 14
-#define TPM_GET_CAP_RET_UINT32_2_IDX 18
-#define TPM_GET_CAP_RET_UINT32_3_IDX 22
-#define TPM_GET_CAP_RET_UINT32_4_IDX 26
-#define TPM_GET_CAP_PERM_DISABLE_IDX 16
-#define TPM_GET_CAP_PERM_INACTIVE_IDX 18
-#define TPM_GET_CAP_RET_BOOL_1_IDX 14
-#define TPM_GET_CAP_TEMP_INACTIVE_IDX 16
-
-#define TPM_CAP_IDX 13
-#define TPM_CAP_SUBCAP_IDX 21
enum tpm_capabilities {
- TPM_CAP_FLAG = 4,
- TPM_CAP_PROP = 5,
+ TPM_CAP_FLAG = cpu_to_be32(4),
+ TPM_CAP_PROP = cpu_to_be32(5),
+ CAP_VERSION_1_1 = cpu_to_be32(0x06),
+ CAP_VERSION_1_2 = cpu_to_be32(0x1A)
};
enum tpm_sub_capabilities {
- TPM_CAP_PROP_PCR = 0x1,
- TPM_CAP_PROP_MANUFACTURER = 0x3,
- TPM_CAP_FLAG_PERM = 0x8,
- TPM_CAP_FLAG_VOL = 0x9,
- TPM_CAP_PROP_OWNER = 0x11,
- TPM_CAP_PROP_TIS_TIMEOUT = 0x15,
- TPM_CAP_PROP_TIS_DURATION = 0x20,
-};
+ TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
+ TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
+ TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
+ TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
+ TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
+ TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
+ TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
-/*
- * This is a semi generic GetCapability command for use
- * with the capability type TPM_CAP_PROP or TPM_CAP_FLAG
- * and their associated sub_capabilities.
- */
-
-static const u8 tpm_cap[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 22, /* length */
- 0, 0, 0, 101, /* TPM_ORD_GetCapability */
- 0, 0, 0, 0, /* TPM_CAP_<TYPE> */
- 0, 0, 0, 4, /* TPM_CAP_SUB_<TYPE> size */
- 0, 0, 1, 0 /* TPM_CAP_SUB_<TYPE> */
};
-static ssize_t transmit_cmd(struct tpm_chip *chip, u8 *data, int len,
- char *desc)
+static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
+ int len, const char *desc)
{
int err;
- len = tpm_transmit(chip, data, len);
+ len = tpm_transmit(chip,(u8 *) cmd, len);
if (len < 0)
return len;
if (len == TPM_ERROR_SIZE) {
- err = be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX)));
+ err = be32_to_cpu(cmd->header.out.return_code);
dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
return err;
}
return 0;
}
+#define TPM_INTERNAL_RESULT_SIZE 200
+#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
+#define TPM_ORD_GET_CAP cpu_to_be32(101)
+
+static const struct tpm_input_header tpm_getcap_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(22),
+ .ordinal = TPM_ORD_GET_CAP
+};
+
+ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
+ const char *desc)
+{
+ struct tpm_cmd_t tpm_cmd;
+ int rc;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
+ tpm_cmd.params.getcap_in.cap = subcap_id;
+ /*subcap field not necessary */
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
+ tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
+ } else {
+ if (subcap_id == TPM_CAP_FLAG_PERM ||
+ subcap_id == TPM_CAP_FLAG_VOL)
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+ else
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = subcap_id;
+ }
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
+ if (!rc)
+ *cap = tpm_cmd.params.getcap_out.cap;
+ return rc;
+}
+
void tpm_gen_interrupt(struct tpm_chip *chip)
{
- u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 30)];
+ struct tpm_cmd_t tpm_cmd;
ssize_t rc;
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_PROP;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_TIS_TIMEOUT;
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
void tpm_get_timeouts(struct tpm_chip *chip)
{
- u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 30)];
+ struct tpm_cmd_t tpm_cmd;
+ struct timeout_t *timeout_cap;
+ struct duration_t *duration_cap;
ssize_t rc;
u32 timeout;
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_PROP;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_TIS_TIMEOUT;
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
if (rc)
goto duration;
- if (be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_SIZE_IDX)))
+ if (be32_to_cpu(tpm_cmd.header.out.length)
!= 4 * sizeof(u32))
goto duration;
+ timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
- timeout =
- be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX)));
+ timeout = be32_to_cpu(timeout_cap->a);
if (timeout)
chip->vendor.timeout_a = usecs_to_jiffies(timeout);
- timeout =
- be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_2_IDX)));
+ timeout = be32_to_cpu(timeout_cap->b);
if (timeout)
chip->vendor.timeout_b = usecs_to_jiffies(timeout);
- timeout =
- be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_3_IDX)));
+ timeout = be32_to_cpu(timeout_cap->c);
if (timeout)
chip->vendor.timeout_c = usecs_to_jiffies(timeout);
- timeout =
- be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_4_IDX)));
+ timeout = be32_to_cpu(timeout_cap->d);
if (timeout)
chip->vendor.timeout_d = usecs_to_jiffies(timeout);
duration:
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_PROP;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_TIS_DURATION;
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
- rc = transmit_cmd(chip, data, sizeof(data),
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the durations");
if (rc)
return;
- if (be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_SIZE_IDX)))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code)
!= 3 * sizeof(u32))
return;
-
+ duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
- usecs_to_jiffies(be32_to_cpu
- (*((__be32 *) (data +
- TPM_GET_CAP_RET_UINT32_1_IDX))));
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
@@ -565,13 +579,9 @@ duration:
chip->vendor.duration[TPM_SHORT] = HZ;
chip->vendor.duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu
- (*((__be32 *) (data +
- TPM_GET_CAP_RET_UINT32_2_IDX))));
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
chip->vendor.duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu
- (*((__be32 *) (data +
- TPM_GET_CAP_RET_UINT32_3_IDX))));
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -587,36 +597,18 @@ void tpm_continue_selftest(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_continue_selftest);
-#define TPM_INTERNAL_RESULT_SIZE 200
-
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
{
- u8 *data;
+ cap_t cap;
ssize_t rc;
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
-
- data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_FLAG;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM;
-
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
- "attemtping to determine the permanent enabled state");
- if (rc) {
- kfree(data);
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent enabled state");
+ if (rc)
return 0;
- }
-
- rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_DISABLE_IDX]);
- kfree(data);
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
@@ -624,31 +616,15 @@ EXPORT_SYMBOL_GPL(tpm_show_enabled);
ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
char *buf)
{
- u8 *data;
+ cap_t cap;
ssize_t rc;
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
-
- data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_FLAG;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM;
-
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
- "attemtping to determine the permanent active state");
- if (rc) {
- kfree(data);
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent active state");
+ if (rc)
return 0;
- }
- rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_INACTIVE_IDX]);
-
- kfree(data);
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_active);
@@ -656,31 +632,15 @@ EXPORT_SYMBOL_GPL(tpm_show_active);
ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
char *buf)
{
- u8 *data;
+ cap_t cap;
ssize_t rc;
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
-
- data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_PROP;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_OWNER;
-
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the owner state");
- if (rc) {
- kfree(data);
+ rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
+ "attempting to determine the owner state");
+ if (rc)
return 0;
- }
-
- rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_RET_BOOL_1_IDX]);
- kfree(data);
+ rc = sprintf(buf, "%d\n", cap.owned);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
@@ -688,116 +648,180 @@ EXPORT_SYMBOL_GPL(tpm_show_owned);
ssize_t tpm_show_temp_deactivated(struct device * dev,
struct device_attribute * attr, char *buf)
{
- u8 *data;
+ cap_t cap;
ssize_t rc;
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
+ rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
+ "attempting to determine the temporary state");
+ if (rc)
+ return 0;
- data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_FLAG;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_VOL;
+/*
+ * tpm_chip_find_get - return tpm_chip for given chip number
+ */
+static struct tpm_chip *tpm_chip_find_get(int chip_num)
+{
+ struct tpm_chip *pos, *chip = NULL;
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the temporary state");
- if (rc) {
- kfree(data);
- return 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
+ if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
+ continue;
+
+ if (try_module_get(pos->dev->driver->owner)) {
+ chip = pos;
+ break;
+ }
}
+ rcu_read_unlock();
+ return chip;
+}
- rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_TEMP_INACTIVE_IDX]);
+#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
+#define READ_PCR_RESULT_SIZE 30
+static struct tpm_input_header pcrread_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(14),
+ .ordinal = TPM_ORDINAL_PCRREAD
+};
- kfree(data);
+int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
+ int rc;
+ struct tpm_cmd_t cmd;
+
+ cmd.header.in = pcrread_header;
+ cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
+ BUILD_BUG_ON(cmd.header.in.length > READ_PCR_RESULT_SIZE);
+ rc = transmit_cmd(chip, &cmd, cmd.header.in.length,
+ "attempting to read a pcr value");
+
+ if (rc == 0)
+ memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
+ TPM_DIGEST_SIZE);
return rc;
}
-EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
-static const u8 pcrread[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 14, /* length */
- 0, 0, 0, 21, /* TPM_ORD_PcrRead */
- 0, 0, 0, 0 /* PCR index */
+/**
+ * tpm_pcr_read - read a pcr value
+ * @chip_num: tpm idx # or ANY
+ * @pcr_idx: pcr idx to retrieve
+ * @res_buf: TPM_PCR value
+ * size of res_buf is 20 bytes (or NULL if you don't care)
+ *
+ * The TPM driver should be built-in, but for whatever reason it
+ * isn't, protect against the chip disappearing, by incrementing
+ * the module usage count.
+ */
+int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+ rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
+ module_put(chip->dev->driver->owner);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_read);
+
+/**
+ * tpm_pcr_extend - extend pcr value with hash
+ * @chip_num: tpm idx # or AN&
+ * @pcr_idx: pcr idx to extend
+ * @hash: hash value used to extend pcr value
+ *
+ * The TPM driver should be built-in, but for whatever reason it
+ * isn't, protect against the chip disappearing, by incrementing
+ * the module usage count.
+ */
+#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define EXTEND_PCR_SIZE 34
+static struct tpm_input_header pcrextend_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(34),
+ .ordinal = TPM_ORD_PCR_EXTEND
};
+int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
+{
+ struct tpm_cmd_t cmd;
+ int rc;
+ struct tpm_chip *chip;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+
+ cmd.header.in = pcrextend_header;
+ BUILD_BUG_ON(be32_to_cpu(cmd.header.in.length) > EXTEND_PCR_SIZE);
+ cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
+ memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
+ rc = transmit_cmd(chip, &cmd, cmd.header.in.length,
+ "attempting extend a PCR value");
+
+ module_put(chip->dev->driver->owner);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+
ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 *data;
+ cap_t cap;
+ u8 digest[TPM_DIGEST_SIZE];
ssize_t rc;
int i, j, num_pcrs;
- __be32 index;
char *str = buf;
-
struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
- data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_PROP;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_PCR;
-
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
+ rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS");
- if (rc) {
- kfree(data);
+ if (rc)
return 0;
- }
- num_pcrs = be32_to_cpu(*((__be32 *) (data + 14)));
+ num_pcrs = be32_to_cpu(cap.num_pcrs);
for (i = 0; i < num_pcrs; i++) {
- memcpy(data, pcrread, sizeof(pcrread));
- index = cpu_to_be32(i);
- memcpy(data + 10, &index, 4);
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
- "attempting to read a PCR");
+ rc = __tpm_pcr_read(chip, i, digest);
if (rc)
- goto out;
+ break;
str += sprintf(str, "PCR-%02d: ", i);
for (j = 0; j < TPM_DIGEST_SIZE; j++)
- str += sprintf(str, "%02X ", *(data + 10 + j));
+ str += sprintf(str, "%02X ", digest[j]);
str += sprintf(str, "\n");
}
-out:
- kfree(data);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_pcrs);
#define READ_PUBEK_RESULT_SIZE 314
-static const u8 readpubek[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 30, /* length */
- 0, 0, 0, 124, /* TPM_ORD_ReadPubek */
+#define TPM_ORD_READPUBEK cpu_to_be32(124)
+struct tpm_input_header tpm_readpubek_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(30),
+ .ordinal = TPM_ORD_READPUBEK
};
ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 *data;
+ struct tpm_cmd_t tpm_cmd;
ssize_t err;
int i, rc;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
- data = kzalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy(data, readpubek, sizeof(readpubek));
-
- err = transmit_cmd(chip, data, READ_PUBEK_RESULT_SIZE,
+ tpm_cmd.header.in = tpm_readpubek_header;
+ err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
"attempting to read the PUBEK");
if (err)
goto out;
@@ -812,7 +836,7 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
256 byte modulus
ignore checksum 20 bytes
*/
-
+ data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
@@ -832,65 +856,33 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
}
out:
rc = str - buf;
- kfree(data);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_pubek);
-#define CAP_VERSION_1_1 6
-#define CAP_VERSION_1_2 0x1A
-#define CAP_VERSION_IDX 13
-static const u8 cap_version[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 18, /* length */
- 0, 0, 0, 101, /* TPM_ORD_GetCapability */
- 0, 0, 0, 0,
- 0, 0, 0, 0
-};
ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 *data;
+ cap_t cap;
ssize_t rc;
char *str = buf;
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
-
- data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_PROP;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER;
-
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
+ rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
- if (rc) {
- kfree(data);
+ if (rc)
return 0;
- }
-
str += sprintf(str, "Manufacturer: 0x%x\n",
- be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX))));
+ be32_to_cpu(cap.manufacturer_id));
- memcpy(data, cap_version, sizeof(cap_version));
- data[CAP_VERSION_IDX] = CAP_VERSION_1_1;
- rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the 1.1 version");
+ rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version");
if (rc)
- goto out;
-
+ return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
- (int) data[14], (int) data[15], (int) data[16],
- (int) data[17]);
-
-out:
- kfree(data);
+ cap.tpm_version.Major, cap.tpm_version.Minor,
+ cap.tpm_version.revMajor, cap.tpm_version.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps);
@@ -898,51 +890,25 @@ EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_caps_1_2(struct device * dev,
struct device_attribute * attr, char *buf)
{
- u8 *data;
- ssize_t len;
+ cap_t cap;
+ ssize_t rc;
char *str = buf;
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip == NULL)
- return -ENODEV;
-
- data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- memcpy(data, tpm_cap, sizeof(tpm_cap));
- data[TPM_CAP_IDX] = TPM_CAP_PROP;
- data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER;
-
- len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE);
- if (len <= TPM_ERROR_SIZE) {
- dev_dbg(chip->dev, "A TPM error (%d) occurred "
- "attempting to determine the manufacturer\n",
- be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX))));
- kfree(data);
+ rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
+ "attempting to determine the manufacturer");
+ if (rc)
return 0;
- }
-
str += sprintf(str, "Manufacturer: 0x%x\n",
- be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX))));
-
- memcpy(data, cap_version, sizeof(cap_version));
- data[CAP_VERSION_IDX] = CAP_VERSION_1_2;
-
- len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE);
- if (len <= TPM_ERROR_SIZE) {
- dev_err(chip->dev, "A TPM error (%d) occurred "
- "attempting to determine the 1.2 version\n",
- be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX))));
- goto out;
- }
+ be32_to_cpu(cap.manufacturer_id));
+ rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version");
+ if (rc)
+ return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
- (int) data[16], (int) data[17], (int) data[18],
- (int) data[19]);
-
-out:
- kfree(data);
+ cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
+ cap.tpm_version_1_2.revMajor,
+ cap.tpm_version_1_2.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 8e30df4a438..8e00b4ddd08 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -26,6 +26,7 @@
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/tpm.h>
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
@@ -123,6 +124,147 @@ static inline void tpm_write_index(int base, int index, int value)
outb(index, base);
outb(value & 0xFF, base+1);
}
+struct tpm_input_header {
+ __be16 tag;
+ __be32 length;
+ __be32 ordinal;
+}__attribute__((packed));
+
+struct tpm_output_header {
+ __be16 tag;
+ __be32 length;
+ __be32 return_code;
+}__attribute__((packed));
+
+struct stclear_flags_t {
+ __be16 tag;
+ u8 deactivated;
+ u8 disableForceClear;
+ u8 physicalPresence;
+ u8 physicalPresenceLock;
+ u8 bGlobalLock;
+}__attribute__((packed));
+
+struct tpm_version_t {
+ u8 Major;
+ u8 Minor;
+ u8 revMajor;
+ u8 revMinor;
+}__attribute__((packed));
+
+struct tpm_version_1_2_t {
+ __be16 tag;
+ u8 Major;
+ u8 Minor;
+ u8 revMajor;
+ u8 revMinor;
+}__attribute__((packed));
+
+struct timeout_t {
+ __be32 a;
+ __be32 b;
+ __be32 c;
+ __be32 d;
+}__attribute__((packed));
+
+struct duration_t {
+ __be32 tpm_short;
+ __be32 tpm_medium;
+ __be32 tpm_long;
+}__attribute__((packed));
+
+struct permanent_flags_t {
+ __be16 tag;
+ u8 disable;
+ u8 ownership;
+ u8 deactivated;
+ u8 readPubek;
+ u8 disableOwnerClear;
+ u8 allowMaintenance;
+ u8 physicalPresenceLifetimeLock;
+ u8 physicalPresenceHWEnable;
+ u8 physicalPresenceCMDEnable;
+ u8 CEKPUsed;
+ u8 TPMpost;
+ u8 TPMpostLock;
+ u8 FIPS;
+ u8 operator;
+ u8 enableRevokeEK;
+ u8 nvLocked;
+ u8 readSRKPub;
+ u8 tpmEstablished;
+ u8 maintenanceDone;
+ u8 disableFullDALogicInfo;
+}__attribute__((packed));
+
+typedef union {
+ struct permanent_flags_t perm_flags;
+ struct stclear_flags_t stclear_flags;
+ bool owned;
+ __be32 num_pcrs;
+ struct tpm_version_t tpm_version;
+ struct tpm_version_1_2_t tpm_version_1_2;
+ __be32 manufacturer_id;
+ struct timeout_t timeout;
+ struct duration_t duration;
+} cap_t;
+
+struct tpm_getcap_params_in {
+ __be32 cap;
+ __be32 subcap_size;
+ __be32 subcap;
+}__attribute__((packed));
+
+struct tpm_getcap_params_out {
+ __be32 cap_size;
+ cap_t cap;
+}__attribute__((packed));
+
+struct tpm_readpubek_params_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ u8 parameters[12]; /*assuming RSA*/
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+}__attribute__((packed));
+
+typedef union {
+ struct tpm_input_header in;
+ struct tpm_output_header out;
+} tpm_cmd_header;
+
+#define TPM_DIGEST_SIZE 20
+struct tpm_pcrread_out {
+ u8 pcr_result[TPM_DIGEST_SIZE];
+}__attribute__((packed));
+
+struct tpm_pcrread_in {
+ __be32 pcr_idx;
+}__attribute__((packed));
+
+struct tpm_pcrextend_in {
+ __be32 pcr_idx;
+ u8 hash[TPM_DIGEST_SIZE];
+}__attribute__((packed));
+
+typedef union {
+ struct tpm_getcap_params_out getcap_out;
+ struct tpm_readpubek_params_out readpubek_out;
+ u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
+ struct tpm_getcap_params_in getcap_in;
+ struct tpm_pcrread_in pcrread_in;
+ struct tpm_pcrread_out pcrread_out;
+ struct tpm_pcrextend_in pcrextend_in;
+} tpm_cmd_params;
+
+struct tpm_cmd_t {
+ tpm_cmd_header header;
+ tpm_cmd_params params;
+}__attribute__((packed));
+
+ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
extern void tpm_get_timeouts(struct tpm_chip *);
extern void tpm_gen_interrupt(struct tpm_chip *);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 1525882190f..1efb2879a94 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
+obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
new file mode 100644
index 00000000000..7783b42f691
--- /dev/null
+++ b/drivers/clocksource/sh_cmt.c
@@ -0,0 +1,615 @@
+/*
+ * SuperH Timer Support - CMT
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/sh_cmt.h>
+
+struct sh_cmt_priv {
+ void __iomem *mapbase;
+ struct clk *clk;
+ unsigned long width; /* 16 or 32 bit version of hardware block */
+ unsigned long overflow_bit;
+ unsigned long clear_bits;
+ struct irqaction irqaction;
+ struct platform_device *pdev;
+
+ unsigned long flags;
+ unsigned long match_value;
+ unsigned long next_match_value;
+ unsigned long max_match_value;
+ unsigned long rate;
+ spinlock_t lock;
+ struct clock_event_device ced;
+ unsigned long total_cycles;
+};
+
+static DEFINE_SPINLOCK(sh_cmt_lock);
+
+#define CMSTR -1 /* shared register */
+#define CMCSR 0 /* channel register */
+#define CMCNT 1 /* channel register */
+#define CMCOR 2 /* channel register */
+
+static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
+{
+ struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == CMSTR) {
+ offs = 0;
+ base -= cfg->channel_offset;
+ } else
+ offs = reg_nr;
+
+ if (p->width == 16)
+ offs <<= 1;
+ else {
+ offs <<= 2;
+ if ((reg_nr == CMCNT) || (reg_nr == CMCOR))
+ return ioread32(base + offs);
+ }
+
+ return ioread16(base + offs);
+}
+
+static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
+ unsigned long value)
+{
+ struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ void __iomem *base = p->mapbase;
+ unsigned long offs;
+
+ if (reg_nr == CMSTR) {
+ offs = 0;
+ base -= cfg->channel_offset;
+ } else
+ offs = reg_nr;
+
+ if (p->width == 16)
+ offs <<= 1;
+ else {
+ offs <<= 2;
+ if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) {
+ iowrite32(value, base + offs);
+ return;
+ }
+ }
+
+ iowrite16(value, base + offs);
+}
+
+static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
+ int *has_wrapped)
+{
+ unsigned long v1, v2, v3;
+
+ /* Make sure the timer value is stable. Stolen from acpi_pm.c */
+ do {
+ v1 = sh_cmt_read(p, CMCNT);
+ v2 = sh_cmt_read(p, CMCNT);
+ v3 = sh_cmt_read(p, CMCNT);
+ } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
+ || (v3 > v1 && v3 < v2)));
+
+ *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit;
+ return v2;
+}
+
+
+static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
+{
+ struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ unsigned long flags, value;
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&sh_cmt_lock, flags);
+ value = sh_cmt_read(p, CMSTR);
+
+ if (start)
+ value |= 1 << cfg->timer_bit;
+ else
+ value &= ~(1 << cfg->timer_bit);
+
+ sh_cmt_write(p, CMSTR, value);
+ spin_unlock_irqrestore(&sh_cmt_lock, flags);
+}
+
+static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
+{
+ struct sh_cmt_config *cfg = p->pdev->dev.platform_data;
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
+ return ret;
+ }
+ *rate = clk_get_rate(p->clk) / 8;
+
+ /* make sure channel is disabled */
+ sh_cmt_start_stop_ch(p, 0);
+
+ /* configure channel, periodic mode and maximum timeout */
+ if (p->width == 16)
+ sh_cmt_write(p, CMCSR, 0);
+ else
+ sh_cmt_write(p, CMCSR, 0x01a4);
+
+ sh_cmt_write(p, CMCOR, 0xffffffff);
+ sh_cmt_write(p, CMCNT, 0);
+
+ /* enable channel */
+ sh_cmt_start_stop_ch(p, 1);
+ return 0;
+}
+
+static void sh_cmt_disable(struct sh_cmt_priv *p)
+{
+ /* disable channel */
+ sh_cmt_start_stop_ch(p, 0);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+/* private flags */
+#define FLAG_CLOCKEVENT (1 << 0)
+#define FLAG_CLOCKSOURCE (1 << 1)
+#define FLAG_REPROGRAM (1 << 2)
+#define FLAG_SKIPEVENT (1 << 3)
+#define FLAG_IRQCONTEXT (1 << 4)
+
+static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
+ int absolute)
+{
+ unsigned long new_match;
+ unsigned long value = p->next_match_value;
+ unsigned long delay = 0;
+ unsigned long now = 0;
+ int has_wrapped;
+
+ now = sh_cmt_get_counter(p, &has_wrapped);
+ p->flags |= FLAG_REPROGRAM; /* force reprogram */
+
+ if (has_wrapped) {
+ /* we're competing with the interrupt handler.
+ * -> let the interrupt handler reprogram the timer.
+ * -> interrupt number two handles the event.
+ */
+ p->flags |= FLAG_SKIPEVENT;
+ return;
+ }
+
+ if (absolute)
+ now = 0;
+
+ do {
+ /* reprogram the timer hardware,
+ * but don't save the new match value yet.
+ */
+ new_match = now + value + delay;
+ if (new_match > p->max_match_value)
+ new_match = p->max_match_value;
+
+ sh_cmt_write(p, CMCOR, new_match);
+
+ now = sh_cmt_get_counter(p, &has_wrapped);
+ if (has_wrapped && (new_match > p->match_value)) {
+ /* we are changing to a greater match value,
+ * so this wrap must be caused by the counter
+ * matching the old value.
+ * -> first interrupt reprograms the timer.
+ * -> interrupt number two handles the event.
+ */
+ p->flags |= FLAG_SKIPEVENT;
+ break;
+ }
+
+ if (has_wrapped) {
+ /* we are changing to a smaller match value,
+ * so the wrap must be caused by the counter
+ * matching the new value.
+ * -> save programmed match value.
+ * -> let isr handle the event.
+ */
+ p->match_value = new_match;
+ break;
+ }
+
+ /* be safe: verify hardware settings */
+ if (now < new_match) {
+ /* timer value is below match value, all good.
+ * this makes sure we won't miss any match events.
+ * -> save programmed match value.
+ * -> let isr handle the event.
+ */
+ p->match_value = new_match;
+ break;
+ }
+
+ /* the counter has reached a value greater
+ * than our new match value. and since the
+ * has_wrapped flag isn't set we must have
+ * programmed a too close event.
+ * -> increase delay and retry.
+ */
+ if (delay)
+ delay <<= 1;
+ else
+ delay = 1;
+
+ if (!delay)
+ pr_warning("sh_cmt: too long delay\n");
+
+ } while (delay);
+}
+
+static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
+{
+ unsigned long flags;
+
+ if (delta > p->max_match_value)
+ pr_warning("sh_cmt: delta out of range\n");
+
+ spin_lock_irqsave(&p->lock, flags);
+ p->next_match_value = delta;
+ sh_cmt_clock_event_program_verify(p, 0);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
+{
+ struct sh_cmt_priv *p = dev_id;
+
+ /* clear flags */
+ sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits);
+
+ /* update clock source counter to begin with if enabled
+ * the wrap flag should be cleared by the timer specific
+ * isr before we end up here.
+ */
+ if (p->flags & FLAG_CLOCKSOURCE)
+ p->total_cycles += p->match_value;
+
+ if (!(p->flags & FLAG_REPROGRAM))
+ p->next_match_value = p->max_match_value;
+
+ p->flags |= FLAG_IRQCONTEXT;
+
+ if (p->flags & FLAG_CLOCKEVENT) {
+ if (!(p->flags & FLAG_SKIPEVENT)) {
+ if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
+ p->next_match_value = p->max_match_value;
+ p->flags |= FLAG_REPROGRAM;
+ }
+
+ p->ced.event_handler(&p->ced);
+ }
+ }
+
+ p->flags &= ~FLAG_SKIPEVENT;
+
+ if (p->flags & FLAG_REPROGRAM) {
+ p->flags &= ~FLAG_REPROGRAM;
+ sh_cmt_clock_event_program_verify(p, 1);
+
+ if (p->flags & FLAG_CLOCKEVENT)
+ if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
+ || (p->match_value == p->next_match_value))
+ p->flags &= ~FLAG_REPROGRAM;
+ }
+
+ p->flags &= ~FLAG_IRQCONTEXT;
+
+ return IRQ_HANDLED;
+}
+
+static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ ret = sh_cmt_enable(p, &p->rate);
+
+ if (ret)
+ goto out;
+ p->flags |= flag;
+
+ /* setup timeout if no clockevent */
+ if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
+ sh_cmt_set_next(p, p->max_match_value);
+ out:
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return ret;
+}
+
+static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
+{
+ unsigned long flags;
+ unsigned long f;
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
+ p->flags &= ~flag;
+
+ if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ sh_cmt_disable(p);
+
+ /* adjust the timeout to maximum if only clocksource left */
+ if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
+ sh_cmt_set_next(p, p->max_match_value);
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
+{
+ return container_of(ced, struct sh_cmt_priv, ced);
+}
+
+static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
+{
+ struct clock_event_device *ced = &p->ced;
+
+ sh_cmt_start(p, FLAG_CLOCKEVENT);
+
+ /* TODO: calculate good shift from rate and counter bit width */
+
+ ced->shift = 32;
+ ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
+ ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
+ ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
+
+ if (periodic)
+ sh_cmt_set_next(p, (p->rate + HZ/2) / HZ);
+ else
+ sh_cmt_set_next(p, p->max_match_value);
+}
+
+static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_ONESHOT:
+ sh_cmt_stop(p, FLAG_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_info("sh_cmt: %s used for periodic clock events\n",
+ ced->name);
+ sh_cmt_clock_event_start(p, 1);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ pr_info("sh_cmt: %s used for oneshot clock events\n",
+ ced->name);
+ sh_cmt_clock_event_start(p, 0);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ sh_cmt_stop(p, FLAG_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+}
+
+static int sh_cmt_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
+
+ BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+ if (likely(p->flags & FLAG_IRQCONTEXT))
+ p->next_match_value = delta;
+ else
+ sh_cmt_set_next(p, delta);
+
+ return 0;
+}
+
+static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clock_event_device *ced = &p->ced;
+
+ memset(ced, 0, sizeof(*ced));
+
+ ced->name = name;
+ ced->features = CLOCK_EVT_FEAT_PERIODIC;
+ ced->features |= CLOCK_EVT_FEAT_ONESHOT;
+ ced->rating = rating;
+ ced->cpumask = cpumask_of(0);
+ ced->set_next_event = sh_cmt_clock_event_next;
+ ced->set_mode = sh_cmt_clock_event_mode;
+
+ pr_info("sh_cmt: %s used for clock events\n", ced->name);
+ ced->mult = 1; /* work around misplaced WARN_ON() in clockevents.c */
+ clockevents_register_device(ced);
+}
+
+int sh_cmt_register(struct sh_cmt_priv *p, char *name,
+ unsigned long clockevent_rating,
+ unsigned long clocksource_rating)
+{
+ if (p->width == (sizeof(p->max_match_value) * 8))
+ p->max_match_value = ~0;
+ else
+ p->max_match_value = (1 << p->width) - 1;
+
+ p->match_value = p->max_match_value;
+ spin_lock_init(&p->lock);
+
+ if (clockevent_rating)
+ sh_cmt_register_clockevent(p, name, clockevent_rating);
+
+ return 0;
+}
+
+static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
+{
+ struct sh_cmt_config *cfg = pdev->dev.platform_data;
+ struct resource *res;
+ int irq, ret;
+ ret = -ENXIO;
+
+ memset(p, 0, sizeof(*p));
+ p->pdev = pdev;
+
+ if (!cfg) {
+ dev_err(&p->pdev->dev, "missing platform data\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&p->pdev->dev, "failed to get I/O memory\n");
+ goto err0;
+ }
+
+ irq = platform_get_irq(p->pdev, 0);
+ if (irq < 0) {
+ dev_err(&p->pdev->dev, "failed to get irq\n");
+ goto err0;
+ }
+
+ /* map memory, let mapbase point to our channel */
+ p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ if (p->mapbase == NULL) {
+ pr_err("sh_cmt: failed to remap I/O memory\n");
+ goto err0;
+ }
+
+ /* request irq using setup_irq() (too early for request_irq()) */
+ p->irqaction.name = cfg->name;
+ p->irqaction.handler = sh_cmt_interrupt;
+ p->irqaction.dev_id = p;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.mask = CPU_MASK_NONE;
+ ret = setup_irq(irq, &p->irqaction);
+ if (ret) {
+ pr_err("sh_cmt: failed to request irq %d\n", irq);
+ goto err1;
+ }
+
+ /* get hold of clock */
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
+ ret = PTR_ERR(p->clk);
+ goto err2;
+ }
+
+ if (resource_size(res) == 6) {
+ p->width = 16;
+ p->overflow_bit = 0x80;
+ p->clear_bits = ~0xc0;
+ } else {
+ p->width = 32;
+ p->overflow_bit = 0x8000;
+ p->clear_bits = ~0xc000;
+ }
+
+ return sh_cmt_register(p, cfg->name,
+ cfg->clockevent_rating,
+ cfg->clocksource_rating);
+ err2:
+ free_irq(irq, p);
+ err1:
+ iounmap(p->mapbase);
+ err0:
+ return ret;
+}
+
+static int __devinit sh_cmt_probe(struct platform_device *pdev)
+{
+ struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+ int ret;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ ret = sh_cmt_setup(p, pdev);
+ if (ret) {
+ kfree(p);
+
+ platform_set_drvdata(pdev, NULL);
+ }
+ return ret;
+}
+
+static int __devexit sh_cmt_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static struct platform_driver sh_cmt_device_driver = {
+ .probe = sh_cmt_probe,
+ .remove = __devexit_p(sh_cmt_remove),
+ .driver = {
+ .name = "sh_cmt",
+ }
+};
+
+static int __init sh_cmt_init(void)
+{
+ return platform_driver_register(&sh_cmt_device_driver);
+}
+
+static void __exit sh_cmt_exit(void)
+{
+ platform_driver_unregister(&sh_cmt_device_driver);
+}
+
+module_init(sh_cmt_init);
+module_exit(sh_cmt_exit);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("SuperH CMT Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d6daf3c507d..d270e8eb3e6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -104,7 +104,8 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
/* internal prototypes */
-static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
+static int __cpufreq_governor(struct cpufreq_policy *policy,
+ unsigned int event);
static unsigned int __cpufreq_get(unsigned int cpu);
static void handle_update(struct work_struct *work);
@@ -128,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void)
pure_initcall(init_cpufreq_transition_notifier_list);
static LIST_HEAD(cpufreq_governor_list);
-static DEFINE_MUTEX (cpufreq_governor_mutex);
+static DEFINE_MUTEX(cpufreq_governor_mutex);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
@@ -371,7 +372,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
struct cpufreq_governor *t;
list_for_each_entry(t, &cpufreq_governor_list, governor_list)
- if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
+ if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
return NULL;
@@ -429,15 +430,11 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_unlock(&cpufreq_governor_mutex);
}
- out:
+out:
return err;
}
-/* drivers/base/cpu.c */
-extern struct sysdev_class cpu_sysdev_class;
-
-
/**
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
@@ -450,11 +447,12 @@ extern struct sysdev_class cpu_sysdev_class;
static ssize_t show_##file_name \
(struct cpufreq_policy *policy, char *buf) \
{ \
- return sprintf (buf, "%u\n", policy->object); \
+ return sprintf(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
@@ -476,7 +474,7 @@ static ssize_t store_##file_name \
if (ret) \
return -EINVAL; \
\
- ret = sscanf (buf, "%u", &new_policy.object); \
+ ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
return -EINVAL; \
\
@@ -486,8 +484,8 @@ static ssize_t store_##file_name \
return ret ? ret : count; \
}
-store_one(scaling_min_freq,min);
-store_one(scaling_max_freq,max);
+store_one(scaling_min_freq, min);
+store_one(scaling_max_freq, max);
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
@@ -507,12 +505,13 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
- if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
+ if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
return sprintf(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
+ return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
+ policy->governor->name);
return -EINVAL;
}
@@ -531,7 +530,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret)
return ret;
- ret = sscanf (buf, "%15s", str_governor);
+ ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
@@ -575,7 +574,8 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
}
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
- if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
+ if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
+ - (CPUFREQ_NAME_LEN + 2)))
goto out;
i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
}
@@ -594,7 +594,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
if (i >= (PAGE_SIZE - 5))
- break;
+ break;
}
i += sprintf(&buf[i], "\n");
return i;
@@ -660,6 +660,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
define_one_ro0400(cpuinfo_cur_freq);
define_one_ro(cpuinfo_min_freq);
define_one_ro(cpuinfo_max_freq);
+define_one_ro(cpuinfo_transition_latency);
define_one_ro(scaling_available_governors);
define_one_ro(scaling_driver);
define_one_ro(scaling_cur_freq);
@@ -673,6 +674,7 @@ define_one_rw(scaling_setspeed);
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
+ &cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
@@ -684,10 +686,10 @@ static struct attribute *default_attrs[] = {
NULL
};
-#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
-#define to_attr(a) container_of(a,struct freq_attr,attr)
+#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
+#define to_attr(a) container_of(a, struct freq_attr, attr)
-static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf)
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
@@ -853,10 +855,10 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
if (cpu == j)
continue;
- /* check for existing affected CPUs. They may not be aware
- * of it due to CPU Hotplug.
+ /* Check for existing affected CPUs.
+ * They may not be aware of it due to CPU Hotplug.
*/
- managed_policy = cpufreq_cpu_get(j); // FIXME: Where is this released? What about error paths?
+ managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */
if (unlikely(managed_policy)) {
/* Set proper policy_cpu */
@@ -1127,8 +1129,8 @@ static void handle_update(struct work_struct *work)
* @old_freq: CPU frequency the kernel thinks the CPU runs at
* @new_freq: CPU frequency the CPU actually runs at
*
- * We adjust to current frequency first, and need to clean up later. So either call
- * to cpufreq_update_policy() or schedule handle_update()).
+ * We adjust to current frequency first, and need to clean up later.
+ * So either call to cpufreq_update_policy() or schedule handle_update()).
*/
static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
unsigned int new_freq)
@@ -1610,7 +1612,8 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
/**
* cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
+ * @policy: struct cpufreq_policy into which the current cpufreq_policy
+ * is written
*
* Reads the current cpufreq policy.
*/
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0320962c4ec..2ecd95e4ab1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001 Russell King
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
* Jun Nakajima <jun.nakajima@intel.com>
- * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -13,22 +13,17 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/smp.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ctype.h>
#include <linux/cpufreq.h>
-#include <linux/sysctl.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/sysfs.h>
#include <linux/cpu.h>
-#include <linux/kmod.h>
-#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
-#include <linux/percpu.h>
#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+
/*
* dbs is used in this file as a shortform for demandbased switching
* It helps to keep variable names smaller, simpler
@@ -43,19 +38,31 @@
* latency of the processor. The governor will work on any processor with
* transition latency <= 10mS, using appropriate sampling
* rate.
- * For CPUs with transition latency > 10mS (mostly drivers
- * with CPUFREQ_ETERNAL), this governor will not work.
+ * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
+ * this governor will not work.
* All times here are in uS.
*/
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
-#define MIN_STAT_SAMPLING_RATE \
+#define MIN_STAT_SAMPLING_RATE \
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE \
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
+ * Define the minimal settable sampling rate to the greater of:
+ * - "HW transition latency" * 100 (same as default sampling / 10)
+ * - MIN_STAT_SAMPLING_RATE
+ * To avoid that userspace shoots itself.
+*/
+static unsigned int minimum_sampling_rate(void)
+{
+ return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
+}
+
+/* This will also vanish soon with removing sampling_rate_max */
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
-#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
+#define LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
@@ -63,12 +70,15 @@ static unsigned int def_sampling_rate;
static void do_dbs_timer(struct work_struct *work);
struct cpu_dbs_info_s {
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
- unsigned int prev_cpu_idle_up;
- unsigned int prev_cpu_idle_down;
- unsigned int enable;
+ struct delayed_work work;
unsigned int down_skip;
unsigned int requested_freq;
+ int cpu;
+ unsigned int enable:1;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@@ -82,19 +92,18 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
*/
-static DEFINE_MUTEX (dbs_mutex);
-static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
+static DEFINE_MUTEX(dbs_mutex);
-struct dbs_tuners {
+static struct workqueue_struct *kconservative_wq;
+
+static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_threshold;
unsigned int ignore_nice;
unsigned int freq_step;
-};
-
-static struct dbs_tuners dbs_tuners_ins = {
+} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
@@ -102,18 +111,37 @@ static struct dbs_tuners dbs_tuners_ins = {
.freq_step = 5,
};
-static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
+ cputime64_t *wall)
{
- unsigned int add_nice = 0, ret;
+ cputime64_t idle_time;
+ cputime64_t cur_wall_time;
+ cputime64_t busy_time;
- if (dbs_tuners_ins.ignore_nice)
- add_nice = kstat_cpu(cpu).cpustat.nice;
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+ busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
+ kstat_cpu(cpu).cpustat.system);
+
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+
+ idle_time = cputime64_sub(cur_wall_time, busy_time);
+ if (wall)
+ *wall = cur_wall_time;
+
+ return idle_time;
+}
+
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, wall);
- ret = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait +
- add_nice;
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
- return ret;
+ return idle_time;
}
/* keep track of frequency transitions */
@@ -125,10 +153,21 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
freq->cpu);
+ struct cpufreq_policy *policy;
+
if (!this_dbs_info->enable)
return 0;
- this_dbs_info->requested_freq = freq->new;
+ policy = this_dbs_info->cur_policy;
+
+ /*
+ * we only care if our internally tracked freq moves outside
+ * the 'valid' ranges of freqency available to us otherwise
+ * we do not change it
+ */
+ if (this_dbs_info->requested_freq > policy->max
+ || this_dbs_info->requested_freq < policy->min)
+ this_dbs_info->requested_freq = freq->new;
return 0;
}
@@ -140,16 +179,31 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
- return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
+ static int print_once;
+
+ if (!print_once) {
+ printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
+ "sysfs file is deprecated - used by: %s\n",
+ current->comm);
+ print_once = 1;
+ }
+ return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
- return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
+ static int print_once;
+
+ if (!print_once) {
+ printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
+ "sysfs file is deprecated - used by: %s\n", current->comm);
+ print_once = 1;
+ }
+ return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
}
-#define define_one_ro(_name) \
-static struct freq_attr _name = \
+#define define_one_ro(_name) \
+static struct freq_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
@@ -174,7 +228,8 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
{
unsigned int input;
int ret;
- ret = sscanf (buf, "%u", &input);
+ ret = sscanf(buf, "%u", &input);
+
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
@@ -190,15 +245,13 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
{
unsigned int input;
int ret;
- ret = sscanf (buf, "%u", &input);
+ ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
- if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
- mutex_unlock(&dbs_mutex);
+ if (ret != 1)
return -EINVAL;
- }
- dbs_tuners_ins.sampling_rate = input;
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
mutex_unlock(&dbs_mutex);
return count;
@@ -209,10 +262,11 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
{
unsigned int input;
int ret;
- ret = sscanf (buf, "%u", &input);
+ ret = sscanf(buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) {
+ if (ret != 1 || input > 100 ||
+ input <= dbs_tuners_ins.down_threshold) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
@@ -228,10 +282,12 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
{
unsigned int input;
int ret;
- ret = sscanf (buf, "%u", &input);
+ ret = sscanf(buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) {
+ /* cannot be lower than 11 otherwise freq will not fall */
+ if (ret != 1 || input < 11 || input > 100 ||
+ input >= dbs_tuners_ins.up_threshold) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
@@ -264,12 +320,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
}
dbs_tuners_ins.ignore_nice = input;
- /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
+ /* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(cpu_dbs_info, j);
- j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
- j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
+ struct cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(cpu_dbs_info, j);
+ dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice)
+ dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
mutex_unlock(&dbs_mutex);
@@ -281,7 +339,6 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
{
unsigned int input;
int ret;
-
ret = sscanf(buf, "%u", &input);
if (ret != 1)
@@ -310,7 +367,7 @@ define_one_rw(down_threshold);
define_one_rw(ignore_nice_load);
define_one_rw(freq_step);
-static struct attribute * dbs_attributes[] = {
+static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
@@ -329,55 +386,78 @@ static struct attribute_group dbs_attr_group = {
/************************** sysfs end ************************/
-static void dbs_check_cpu(int cpu)
+static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
- unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
- unsigned int tmp_idle_ticks, total_idle_ticks;
+ unsigned int load = 0;
unsigned int freq_target;
- unsigned int freq_down_sampling_rate;
- struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
- struct cpufreq_policy *policy;
- if (!this_dbs_info->enable)
- return;
+ struct cpufreq_policy *policy;
+ unsigned int j;
policy = this_dbs_info->cur_policy;
/*
- * The default safe range is 20% to 80%
- * Every sampling_rate, we check
- * - If current idle time is less than 20%, then we try to
- * increase frequency
- * Every sampling_rate*sampling_down_factor, we check
- * - If current idle time is more than 80%, then we try to
- * decrease frequency
+ * Every sampling_rate, we check, if current idle time is less
+ * than 20% (default), then we try to increase frequency
+ * Every sampling_rate*sampling_down_factor, we check, if current
+ * idle time is more than 80%, then we try to decrease frequency
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
- * 5% (default) of max_frequency
+ * 5% (default) of maximum frequency
*/
- /* Check for frequency increase */
- idle_ticks = UINT_MAX;
+ /* Get Absolute Load */
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ cputime64_t cur_wall_time, cur_idle_time;
+ unsigned int idle_time, wall_time;
- /* Check for frequency increase */
- total_idle_ticks = get_cpu_idle_time(cpu);
- tmp_idle_ticks = total_idle_ticks -
- this_dbs_info->prev_cpu_idle_up;
- this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ j_dbs_info->prev_cpu_wall);
+ j_dbs_info->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
+ j_dbs_info->prev_cpu_idle);
+ j_dbs_info->prev_cpu_idle = cur_idle_time;
+
+ if (dbs_tuners_ins.ignore_nice) {
+ cputime64_t cur_nice;
+ unsigned long cur_nice_jiffies;
+
+ cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
+ j_dbs_info->prev_cpu_nice);
+ /*
+ * Assumption: nice time between sampling periods will
+ * be less than 2^32 jiffies for 32 bit sys
+ */
+ cur_nice_jiffies = (unsigned long)
+ cputime64_to_jiffies64(cur_nice);
- if (tmp_idle_ticks < idle_ticks)
- idle_ticks = tmp_idle_ticks;
+ j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ }
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ load = 100 * (wall_time - idle_time) / wall_time;
+ }
- /* Scale idle ticks by 100 and compare with up and down ticks */
- idle_ticks *= 100;
- up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ /*
+ * break out if we 'cannot' reduce the speed as the user might
+ * want freq_step to be zero
+ */
+ if (dbs_tuners_ins.freq_step == 0)
+ return;
- if (idle_ticks < up_idle_ticks) {
+ /* Check for frequency increase */
+ if (load > dbs_tuners_ins.up_threshold) {
this_dbs_info->down_skip = 0;
- this_dbs_info->prev_cpu_idle_down =
- this_dbs_info->prev_cpu_idle_up;
/* if we are already at full speed then break out early */
if (this_dbs_info->requested_freq == policy->max)
@@ -398,49 +478,24 @@ static void dbs_check_cpu(int cpu)
return;
}
- /* Check for frequency decrease */
- this_dbs_info->down_skip++;
- if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor)
- return;
-
- /* Check for frequency decrease */
- total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
- tmp_idle_ticks = total_idle_ticks -
- this_dbs_info->prev_cpu_idle_down;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
-
- if (tmp_idle_ticks < idle_ticks)
- idle_ticks = tmp_idle_ticks;
-
- /* Scale idle ticks by 100 and compare with up and down ticks */
- idle_ticks *= 100;
- this_dbs_info->down_skip = 0;
-
- freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
- dbs_tuners_ins.sampling_down_factor;
- down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
- usecs_to_jiffies(freq_down_sampling_rate);
-
- if (idle_ticks > down_idle_ticks) {
- /*
- * if we are already at the lowest speed then break out early
- * or if we 'cannot' reduce the speed as the user might want
- * freq_target to be zero
- */
- if (this_dbs_info->requested_freq == policy->min
- || dbs_tuners_ins.freq_step == 0)
- return;
-
+ /*
+ * The optimal frequency is the frequency that is the lowest that
+ * can support the current CPU usage without triggering the up
+ * policy. To be safe, we focus 10 points under the threshold.
+ */
+ if (load < (dbs_tuners_ins.down_threshold - 10)) {
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
- /* max freq cannot be less than 100. But who knows.... */
- if (unlikely(freq_target == 0))
- freq_target = 5;
-
this_dbs_info->requested_freq -= freq_target;
if (this_dbs_info->requested_freq < policy->min)
this_dbs_info->requested_freq = policy->min;
+ /*
+ * if we cannot reduce the frequency anymore, break out early
+ */
+ if (policy->cur == policy->min)
+ return;
+
__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
CPUFREQ_RELATION_H);
return;
@@ -449,27 +504,45 @@ static void dbs_check_cpu(int cpu)
static void do_dbs_timer(struct work_struct *work)
{
- int i;
- mutex_lock(&dbs_mutex);
- for_each_online_cpu(i)
- dbs_check_cpu(i);
- schedule_delayed_work(&dbs_work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
- mutex_unlock(&dbs_mutex);
+ struct cpu_dbs_info_s *dbs_info =
+ container_of(work, struct cpu_dbs_info_s, work.work);
+ unsigned int cpu = dbs_info->cpu;
+
+ /* We want all CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ delay -= jiffies % delay;
+
+ if (lock_policy_rwsem_write(cpu) < 0)
+ return;
+
+ if (!dbs_info->enable) {
+ unlock_policy_rwsem_write(cpu);
+ return;
+ }
+
+ dbs_check_cpu(dbs_info);
+
+ queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
+ unlock_policy_rwsem_write(cpu);
}
-static inline void dbs_timer_init(void)
+static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
{
- init_timer_deferrable(&dbs_work.timer);
- schedule_delayed_work(&dbs_work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
- return;
+ /* We want all CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ delay -= jiffies % delay;
+
+ dbs_info->enable = 1;
+ INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+ queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
+ delay);
}
-static inline void dbs_timer_exit(void)
+static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
- cancel_delayed_work(&dbs_work);
- return;
+ dbs_info->enable = 0;
+ cancel_delayed_work(&dbs_info->work);
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -503,11 +576,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
- j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
- j_dbs_info->prev_cpu_idle_down
- = j_dbs_info->prev_cpu_idle_up;
+ j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &j_dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice) {
+ j_dbs_info->prev_cpu_nice =
+ kstat_cpu(j).cpustat.nice;
+ }
}
- this_dbs_info->enable = 1;
this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur;
@@ -523,38 +598,36 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0)
latency = 1;
- def_sampling_rate = 10 * latency *
- DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
-
- if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
- def_sampling_rate = MIN_STAT_SAMPLING_RATE;
+ def_sampling_rate =
+ max(latency * LATENCY_MULTIPLIER,
+ MIN_STAT_SAMPLING_RATE);
dbs_tuners_ins.sampling_rate = def_sampling_rate;
- dbs_timer_init();
cpufreq_register_notifier(
&dbs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
+ dbs_timer_init(this_dbs_info);
mutex_unlock(&dbs_mutex);
+
break;
case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex);
- this_dbs_info->enable = 0;
+ dbs_timer_exit(this_dbs_info);
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;
+
/*
* Stop the timerschedule work, when this governor
* is used for first time
*/
- if (dbs_enable == 0) {
- dbs_timer_exit();
+ if (dbs_enable == 0)
cpufreq_unregister_notifier(
&dbs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
- }
mutex_unlock(&dbs_mutex);
@@ -571,6 +644,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
+
break;
}
return 0;
@@ -588,23 +662,33 @@ struct cpufreq_governor cpufreq_gov_conservative = {
static int __init cpufreq_gov_dbs_init(void)
{
- return cpufreq_register_governor(&cpufreq_gov_conservative);
+ int err;
+
+ kconservative_wq = create_workqueue("kconservative");
+ if (!kconservative_wq) {
+ printk(KERN_ERR "Creation of kconservative failed\n");
+ return -EFAULT;
+ }
+
+ err = cpufreq_register_governor(&cpufreq_gov_conservative);
+ if (err)
+ destroy_workqueue(kconservative_wq);
+
+ return err;
}
static void __exit cpufreq_gov_dbs_exit(void)
{
- /* Make sure that the scheduled work is indeed not running */
- flush_scheduled_work();
-
cpufreq_unregister_governor(&cpufreq_gov_conservative);
+ destroy_workqueue(kconservative_wq);
}
-MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>");
-MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for "
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors "
"optimised for use in a battery environment");
-MODULE_LICENSE ("GPL");
+MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
fs_initcall(cpufreq_gov_dbs_init);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6f45b1658a6..338f428a15b 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -21,6 +21,7 @@
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/ktime.h>
+#include <linux/sched.h>
/*
* dbs is used in this file as a shortform for demandbased switching
@@ -51,8 +52,20 @@ static unsigned int def_sampling_rate;
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE \
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
+ * Define the minimal settable sampling rate to the greater of:
+ * - "HW transition latency" * 100 (same as default sampling / 10)
+ * - MIN_STAT_SAMPLING_RATE
+ * To avoid that userspace shoots itself.
+*/
+static unsigned int minimum_sampling_rate(void)
+{
+ return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
+}
+
+/* This will also vanish soon with removing sampling_rate_max */
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
-#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
+#define LATENCY_MULTIPLIER (1000)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static void do_dbs_timer(struct work_struct *work);
@@ -65,14 +78,14 @@ struct cpu_dbs_info_s {
cputime64_t prev_cpu_wall;
cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
- struct delayed_work work;
+ struct delayed_work work;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
int cpu;
unsigned int enable:1,
- sample_type:1;
+ sample_type:1;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@@ -203,12 +216,28 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
- return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
+ static int print_once;
+
+ if (!print_once) {
+ printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
+ "sysfs file is deprecated - used by: %s\n",
+ current->comm);
+ print_once = 1;
+ }
+ return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
- return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
+ static int print_once;
+
+ if (!print_once) {
+ printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
+ "sysfs file is deprecated - used by: %s\n",
+ current->comm);
+ print_once = 1;
+ }
+ return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
}
#define define_one_ro(_name) \
@@ -238,13 +267,11 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
ret = sscanf(buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > MAX_SAMPLING_RATE
- || input < MIN_SAMPLING_RATE) {
+ if (ret != 1) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
-
- dbs_tuners_ins.sampling_rate = input;
+ dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
mutex_unlock(&dbs_mutex);
return count;
@@ -279,14 +306,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
unsigned int j;
ret = sscanf(buf, "%u", &input);
- if ( ret != 1 )
+ if (ret != 1)
return -EINVAL;
- if ( input > 1 )
+ if (input > 1)
input = 1;
mutex_lock(&dbs_mutex);
- if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
+ if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
mutex_unlock(&dbs_mutex);
return count;
}
@@ -337,7 +364,7 @@ define_one_rw(up_threshold);
define_one_rw(ignore_nice_load);
define_one_rw(powersave_bias);
-static struct attribute * dbs_attributes[] = {
+static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
@@ -512,8 +539,7 @@ static void do_dbs_timer(struct work_struct *work)
}
} else {
__cpufreq_driver_target(dbs_info->cur_policy,
- dbs_info->freq_lo,
- CPUFREQ_RELATION_H);
+ dbs_info->freq_lo, CPUFREQ_RELATION_H);
}
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu);
@@ -530,7 +556,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
- delay);
+ delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -591,11 +617,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0)
latency = 1;
- def_sampling_rate = latency *
- DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
-
- if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
- def_sampling_rate = MIN_STAT_SAMPLING_RATE;
+ def_sampling_rate =
+ max(latency * LATENCY_MULTIPLIER,
+ MIN_STAT_SAMPLING_RATE);
dbs_tuners_ins.sampling_rate = def_sampling_rate;
}
@@ -617,12 +641,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->max,
- CPUFREQ_RELATION_H);
+ policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->min,
- CPUFREQ_RELATION_L);
+ policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
break;
}
@@ -677,7 +699,7 @@ static void __exit cpufreq_gov_dbs_exit(void)
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
- "Low Latency Frequency Transition capable processors");
+ "Low Latency Frequency Transition capable processors");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c0ff97d375d..5a62d678dd1 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -2,7 +2,7 @@
* drivers/cpufreq/cpufreq_stats.c
*
* Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
- * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
+ * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,7 +23,7 @@
static spinlock_t cpufreq_stats_lock;
-#define CPUFREQ_STATDEVICE_ATTR(_name,_mode,_show) \
+#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
static struct freq_attr _attr_##_name = {\
.attr = {.name = __stringify(_name), .mode = _mode, }, \
.show = _show,\
@@ -50,8 +50,7 @@ struct cpufreq_stats_attribute {
ssize_t(*show) (struct cpufreq_stats *, char *);
};
-static int
-cpufreq_stats_update (unsigned int cpu)
+static int cpufreq_stats_update(unsigned int cpu)
{
struct cpufreq_stats *stat;
unsigned long long cur_time;
@@ -68,8 +67,7 @@ cpufreq_stats_update (unsigned int cpu)
return 0;
}
-static ssize_t
-show_total_trans(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
if (!stat)
@@ -78,8 +76,7 @@ show_total_trans(struct cpufreq_policy *policy, char *buf)
per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
}
-static ssize_t
-show_time_in_state(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{
ssize_t len = 0;
int i;
@@ -89,14 +86,14 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
cpufreq_stats_update(stat->cpu);
for (i = 0; i < stat->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
- (unsigned long long)cputime64_to_clock_t(stat->time_in_state[i]));
+ (unsigned long long)
+ cputime64_to_clock_t(stat->time_in_state[i]));
}
return len;
}
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
-static ssize_t
-show_trans_table(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
ssize_t len = 0;
int i, j;
@@ -139,11 +136,11 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
return PAGE_SIZE;
return len;
}
-CPUFREQ_STATDEVICE_ATTR(trans_table,0444,show_trans_table);
+CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table);
#endif
-CPUFREQ_STATDEVICE_ATTR(total_trans,0444,show_total_trans);
-CPUFREQ_STATDEVICE_ATTR(time_in_state,0444,show_time_in_state);
+CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans);
+CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state);
static struct attribute *default_attrs[] = {
&_attr_total_trans.attr,
@@ -158,8 +155,7 @@ static struct attribute_group stats_attr_group = {
.name = "stats"
};
-static int
-freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
+static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
{
int index;
for (index = 0; index < stat->max_state; index++)
@@ -183,8 +179,7 @@ static void cpufreq_stats_free_table(unsigned int cpu)
cpufreq_cpu_put(policy);
}
-static int
-cpufreq_stats_create_table (struct cpufreq_policy *policy,
+static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
unsigned int i, j, count = 0, ret = 0;
@@ -194,7 +189,8 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
unsigned int cpu = policy->cpu;
if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY;
- if ((stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL)
+ stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
+ if ((stat) == NULL)
return -ENOMEM;
data = cpufreq_cpu_get(cpu);
@@ -203,13 +199,14 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
goto error_get_fail;
}
- if ((ret = sysfs_create_group(&data->kobj, &stats_attr_group)))
+ ret = sysfs_create_group(&data->kobj, &stats_attr_group);
+ if (ret)
goto error_out;
stat->cpu = cpu;
per_cpu(cpufreq_stats_table, cpu) = stat;
- for (i=0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
@@ -255,9 +252,8 @@ error_get_fail:
return ret;
}
-static int
-cpufreq_stat_notifier_policy (struct notifier_block *nb, unsigned long val,
- void *data)
+static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
+ unsigned long val, void *data)
{
int ret;
struct cpufreq_policy *policy = data;
@@ -268,14 +264,14 @@ cpufreq_stat_notifier_policy (struct notifier_block *nb, unsigned long val,
table = cpufreq_frequency_get_table(cpu);
if (!table)
return 0;
- if ((ret = cpufreq_stats_create_table(policy, table)))
+ ret = cpufreq_stats_create_table(policy, table);
+ if (ret)
return ret;
return 0;
}
-static int
-cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
- void *data)
+static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
+ unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
struct cpufreq_stats *stat;
@@ -340,19 +336,20 @@ static struct notifier_block notifier_trans_block = {
.notifier_call = cpufreq_stat_notifier_trans
};
-static int
-__init cpufreq_stats_init(void)
+static int __init cpufreq_stats_init(void)
{
int ret;
unsigned int cpu;
spin_lock_init(&cpufreq_stats_lock);
- if ((ret = cpufreq_register_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER)))
+ ret = cpufreq_register_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (ret)
return ret;
- if ((ret = cpufreq_register_notifier(&notifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER))) {
+ ret = cpufreq_register_notifier(&notifier_trans_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
cpufreq_unregister_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
return ret;
@@ -364,8 +361,7 @@ __init cpufreq_stats_init(void)
}
return 0;
}
-static void
-__exit cpufreq_stats_exit(void)
+static void __exit cpufreq_stats_exit(void)
{
unsigned int cpu;
@@ -379,10 +375,10 @@ __exit cpufreq_stats_exit(void)
}
}
-MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
-MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats "
+MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
+MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
"through sysfs filesystem");
-MODULE_LICENSE ("GPL");
+MODULE_LICENSE("GPL");
module_init(cpufreq_stats_init);
module_exit(cpufreq_stats_exit);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 1442bbada05..66d2d1d6c80 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -24,9 +24,6 @@
#include <linux/sysfs.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
-
-
/**
* A few values needed by the userspace governor
*/
@@ -37,7 +34,7 @@ static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
userspace */
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
-static DEFINE_MUTEX (userspace_mutex);
+static DEFINE_MUTEX(userspace_mutex);
static int cpus_using_userspace_governor;
#define dprintk(msg...) \
@@ -46,9 +43,9 @@ static int cpus_using_userspace_governor;
/* keep track of frequency transitions */
static int
userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+ void *data)
{
- struct cpufreq_freqs *freq = data;
+ struct cpufreq_freqs *freq = data;
if (!per_cpu(cpu_is_managed, freq->cpu))
return 0;
@@ -57,11 +54,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
freq->cpu, freq->new);
per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
- return 0;
+ return 0;
}
static struct notifier_block userspace_cpufreq_notifier_block = {
- .notifier_call = userspace_cpufreq_notifier
+ .notifier_call = userspace_cpufreq_notifier
};
@@ -93,8 +90,11 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
* We're safe from concurrent calls to ->target() here
* as we hold the userspace_mutex lock. If we were calling
* cpufreq_driver_target, a deadlock situation might occur:
- * A: cpufreq_set (lock userspace_mutex) -> cpufreq_driver_target(lock policy->lock)
- * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_mutex)
+ * A: cpufreq_set (lock userspace_mutex) ->
+ * cpufreq_driver_target(lock policy->lock)
+ * B: cpufreq_set_policy(lock policy->lock) ->
+ * __cpufreq_governor ->
+ * cpufreq_governor_userspace (lock userspace_mutex)
*/
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
@@ -210,9 +210,10 @@ static void __exit cpufreq_gov_userspace_exit(void)
}
-MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>, Russell King <rmk@arm.linux.org.uk>");
-MODULE_DESCRIPTION ("CPUfreq policy governor 'userspace'");
-MODULE_LICENSE ("GPL");
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
+ "Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
+MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
fs_initcall(cpufreq_gov_userspace_init);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 9071d80fbba..a9bd3a05a68 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -28,7 +28,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
unsigned int max_freq = 0;
unsigned int i;
- for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID) {
dprintk("table entry %u is invalid, skipping\n", i);
@@ -70,7 +70,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
- for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
@@ -125,13 +125,13 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
if (!cpu_online(policy->cpu))
return -EINVAL;
- for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
if ((freq < policy->min) || (freq > policy->max))
continue;
- switch(relation) {
+ switch (relation) {
case CPUFREQ_RELATION_H:
if (freq <= target_freq) {
if (freq >= optimal.frequency) {
@@ -178,7 +178,7 @@ static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
-static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
+static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
{
unsigned int i = 0;
unsigned int cpu = policy->cpu;
@@ -190,7 +190,7 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
table = per_cpu(show_table, cpu);
- for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
count += sprintf(&buf[count], "%d ", table[i].frequency);
@@ -234,6 +234,6 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
-MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
-MODULE_DESCRIPTION ("CPUfreq frequency table helpers");
-MODULE_LICENSE ("GPL");
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("CPUfreq frequency table helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e522144cba3..01afd758072 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -86,7 +86,7 @@ config ZCRYPT_MONOLITHIC
config CRYPTO_SHA1_S390
tristate "SHA1 digest algorithm"
depends on S390
- select CRYPTO_ALGAPI
+ select CRYPTO_HASH
help
This is the s390 hardware accelerated implementation of the
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
@@ -94,7 +94,7 @@ config CRYPTO_SHA1_S390
config CRYPTO_SHA256_S390
tristate "SHA256 digest algorithm"
depends on S390
- select CRYPTO_ALGAPI
+ select CRYPTO_HASH
help
This is the s390 hardware accelerated implementation of the
SHA256 secure hash standard (DFIPS 180-2).
@@ -105,7 +105,7 @@ config CRYPTO_SHA256_S390
config CRYPTO_SHA512_S390
tristate "SHA384 and SHA512 digest algorithm"
depends on S390
- select CRYPTO_ALGAPI
+ select CRYPTO_HASH
help
This is the s390 hardware accelerated implementation of the
SHA512 secure hash standard.
@@ -200,4 +200,13 @@ config CRYPTO_DEV_IXP4XX
help
Driver for the IXP4xx NPE crypto engine.
+config CRYPTO_DEV_PPC4XX
+ tristate "Driver AMCC PPC4xx crypto accelerator"
+ depends on PPC && 4xx
+ select CRYPTO_HASH
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ help
+ This option allows you to have support for AMCC crypto acceleration.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 73557b2968d..9bf4a2bc884 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
new file mode 100644
index 00000000000..aa376e8d5ed
--- /dev/null
+++ b/drivers/crypto/amcc/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
+crypto4xx-objs := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
new file mode 100644
index 00000000000..61b6e1bec8c
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -0,0 +1,293 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file implements the Linux crypto algorithms.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
+#include "crypto4xx_core.h"
+
+void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
+ u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
+ u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
+ u32 dir)
+{
+ sa->sa_command_0.w = 0;
+ sa->sa_command_0.bf.save_hash_state = save_h;
+ sa->sa_command_0.bf.save_iv = save_iv;
+ sa->sa_command_0.bf.load_hash_state = ld_h;
+ sa->sa_command_0.bf.load_iv = ld_iv;
+ sa->sa_command_0.bf.hdr_proc = hdr_proc;
+ sa->sa_command_0.bf.hash_alg = h;
+ sa->sa_command_0.bf.cipher_alg = c;
+ sa->sa_command_0.bf.pad_type = pad_type & 3;
+ sa->sa_command_0.bf.extend_pad = pad_type >> 2;
+ sa->sa_command_0.bf.op_group = op_grp;
+ sa->sa_command_0.bf.opcode = op;
+ sa->sa_command_0.bf.dir = dir;
+}
+
+void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
+ u32 cfb, u32 esn, u32 sn_mask, u32 mute,
+ u32 cp_pad, u32 cp_pay, u32 cp_hdr)
+{
+ sa->sa_command_1.w = 0;
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
+ sa->sa_command_1.bf.feedback_mode = cfb,
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.extended_seq_num = esn;
+ sa->sa_command_1.bf.seq_num_mask = sn_mask;
+ sa->sa_command_1.bf.mutable_bit_proc = mute;
+ sa->sa_command_1.bf.copy_pad = cp_pad;
+ sa->sa_command_1.bf.copy_payload = cp_pay;
+ sa->sa_command_1.bf.copy_hdr = cp_hdr;
+}
+
+int crypto4xx_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+ ctx->hash_final = 0;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, req->info,
+ get_dynamic_sa_iv_size(ctx));
+}
+
+int crypto4xx_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+ ctx->hash_final = 0;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, req->info,
+ get_dynamic_sa_iv_size(ctx));
+}
+
+/**
+ * AES Functions
+ */
+static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned char cm,
+ u8 fb)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+
+ if (keylen != AES_KEYSIZE_256 &&
+ keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
+ crypto_ablkcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ ctx->hash_final = 0;
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+ fb, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+ sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ ctx->is_hash = 0;
+ ctx->direction = DIR_INBOUND;
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+}
+
+int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+/**
+ * HASH SHA1 Functions
+ */
+static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
+ unsigned int sa_len,
+ unsigned char ha,
+ unsigned char hm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ struct dynamic_sa_hash160 *sa_in;
+ int rc;
+
+ ctx->dev = my_alg->dev;
+ ctx->is_hash = 1;
+ ctx->hash_final = 0;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, sa_len);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ return -ENOMEM;
+ }
+ }
+
+ tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH, DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ ctx->direction = DIR_INBOUND;
+ sa->sa_contents = SA_HASH160_CONTENTS;
+ sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
+ /* Need to zero hash digest in SA */
+ memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
+ memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
+ sa_in->state_ptr = ctx->state_record_dma_addr;
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ return 0;
+}
+
+int crypto4xx_hash_init(struct ahash_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int ds;
+ struct dynamic_sa_ctl *sa;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ ds = crypto_ahash_digestsize(
+ __crypto_ahash_cast(req->base.tfm));
+ sa->sa_command_0.bf.digest_len = ds >> 2;
+ sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
+ ctx->is_hash = 1;
+ ctx->direction = DIR_INBOUND;
+
+ return 0;
+}
+
+int crypto4xx_hash_update(struct ahash_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->is_hash = 1;
+ ctx->hash_final = 0;
+ ctx->pd_ctl = 0x11;
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ (struct scatterlist *) req->result,
+ req->nbytes, NULL, 0);
+}
+
+int crypto4xx_hash_final(struct ahash_request *req)
+{
+ return 0;
+}
+
+int crypto4xx_hash_digest(struct ahash_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->hash_final = 1;
+ ctx->pd_ctl = 0x11;
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ (struct scatterlist *) req->result,
+ req->nbytes, NULL, 0);
+}
+
+/**
+ * SHA1 Algorithm
+ */
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
+{
+ return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
+ SA_HASH_MODE_HASH);
+}
+
+
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
new file mode 100644
index 00000000000..4c0dfb2b872
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -0,0 +1,1310 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file implements AMCC crypto offload Linux device driver for use with
+ * Linux CryptoAPI.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/cacheflush.h>
+#include <crypto/internal/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
+
+#define PPC4XX_SEC_VERSION_STR "0.5"
+
+/**
+ * PPC4xx Crypto Engine Initialization Routine
+ */
+static void crypto4xx_hw_init(struct crypto4xx_device *dev)
+{
+ union ce_ring_size ring_size;
+ union ce_ring_contol ring_ctrl;
+ union ce_part_ring_size part_ring_size;
+ union ce_io_threshold io_threshold;
+ u32 rand_num;
+ union ce_pe_dma_cfg pe_dma_cfg;
+
+ writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
+ /* setup pe dma, include reset sg, pdr and pe, then release reset */
+ pe_dma_cfg.w = 0;
+ pe_dma_cfg.bf.bo_sgpd_en = 1;
+ pe_dma_cfg.bf.bo_data_en = 0;
+ pe_dma_cfg.bf.bo_sa_en = 1;
+ pe_dma_cfg.bf.bo_pd_en = 1;
+ pe_dma_cfg.bf.dynamic_sa_en = 1;
+ pe_dma_cfg.bf.reset_sg = 1;
+ pe_dma_cfg.bf.reset_pdr = 1;
+ pe_dma_cfg.bf.reset_pe = 1;
+ writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ /* un reset pe,sg and pdr */
+ pe_dma_cfg.bf.pe_mode = 0;
+ pe_dma_cfg.bf.reset_sg = 0;
+ pe_dma_cfg.bf.reset_pdr = 0;
+ pe_dma_cfg.bf.reset_pe = 0;
+ pe_dma_cfg.bf.bo_td_en = 0;
+ writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
+ writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+ get_random_bytes(&rand_num, sizeof(rand_num));
+ writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
+ get_random_bytes(&rand_num, sizeof(rand_num));
+ writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
+ ring_size.w = 0;
+ ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
+ ring_size.bf.ring_size = PPC4XX_NUM_PD;
+ writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
+ ring_ctrl.w = 0;
+ writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
+ writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
+ writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
+ part_ring_size.w = 0;
+ part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
+ part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
+ writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
+ writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
+ io_threshold.w = 0;
+ io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
+ io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
+ writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+ writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
+ writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
+ /* un reset pe,sg and pdr */
+ pe_dma_cfg.bf.pe_mode = 1;
+ pe_dma_cfg.bf.reset_sg = 0;
+ pe_dma_cfg.bf.reset_pdr = 0;
+ pe_dma_cfg.bf.reset_pe = 0;
+ pe_dma_cfg.bf.bo_td_en = 0;
+ writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ /*clear all pending interrupt*/
+ writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+ writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+ writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+}
+
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
+{
+ ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
+ &ctx->sa_in_dma_addr, GFP_ATOMIC);
+ if (ctx->sa_in == NULL)
+ return -ENOMEM;
+
+ ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
+ &ctx->sa_out_dma_addr, GFP_ATOMIC);
+ if (ctx->sa_out == NULL) {
+ dma_free_coherent(ctx->dev->core_dev->device,
+ ctx->sa_len * 4,
+ ctx->sa_in, ctx->sa_in_dma_addr);
+ return -ENOMEM;
+ }
+
+ memset(ctx->sa_in, 0, size * 4);
+ memset(ctx->sa_out, 0, size * 4);
+ ctx->sa_len = size;
+
+ return 0;
+}
+
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
+{
+ if (ctx->sa_in != NULL)
+ dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
+ ctx->sa_in, ctx->sa_in_dma_addr);
+ if (ctx->sa_out != NULL)
+ dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
+ ctx->sa_out, ctx->sa_out_dma_addr);
+
+ ctx->sa_in_dma_addr = 0;
+ ctx->sa_out_dma_addr = 0;
+ ctx->sa_len = 0;
+}
+
+u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
+{
+ ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
+ sizeof(struct sa_state_record),
+ &ctx->state_record_dma_addr, GFP_ATOMIC);
+ if (!ctx->state_record_dma_addr)
+ return -ENOMEM;
+ memset(ctx->state_record, 0, sizeof(struct sa_state_record));
+
+ return 0;
+}
+
+void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
+{
+ if (ctx->state_record != NULL)
+ dma_free_coherent(ctx->dev->core_dev->device,
+ sizeof(struct sa_state_record),
+ ctx->state_record,
+ ctx->state_record_dma_addr);
+ ctx->state_record_dma_addr = 0;
+}
+
+/**
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
+{
+ int i;
+ struct pd_uinfo *pd_uinfo;
+ dev->pdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ &dev->pdr_pa, GFP_ATOMIC);
+ if (!dev->pdr)
+ return -ENOMEM;
+
+ dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
+ GFP_KERNEL);
+ if (!dev->pdr_uinfo) {
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ dev->pdr,
+ dev->pdr_pa);
+ return -ENOMEM;
+ }
+ memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+ dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
+ 256 * PPC4XX_NUM_PD,
+ &dev->shadow_sa_pool_pa,
+ GFP_ATOMIC);
+ if (!dev->shadow_sa_pool)
+ return -ENOMEM;
+
+ dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+ &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+ if (!dev->shadow_sr_pool)
+ return -ENOMEM;
+ for (i = 0; i < PPC4XX_NUM_PD; i++) {
+ pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
+ sizeof(struct pd_uinfo) * i);
+
+ /* alloc 256 bytes which is enough for any kind of dynamic sa */
+ pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
+ pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+
+ /* alloc state record */
+ pd_uinfo->sr_va = dev->shadow_sr_pool +
+ sizeof(struct sa_state_record) * i;
+ pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
+ sizeof(struct sa_state_record) * i;
+ }
+
+ return 0;
+}
+
+static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
+{
+ if (dev->pdr != NULL)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ dev->pdr, dev->pdr_pa);
+ if (dev->shadow_sa_pool)
+ dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
+ dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+ if (dev->shadow_sr_pool)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+ dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
+
+ kfree(dev->pdr_uinfo);
+}
+
+static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
+{
+ u32 retval;
+ u32 tmp;
+
+ retval = dev->pdr_head;
+ tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
+
+ if (tmp == dev->pdr_tail)
+ return ERING_WAS_FULL;
+
+ dev->pdr_head = tmp;
+
+ return retval;
+}
+
+static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
+{
+ struct pd_uinfo *pd_uinfo;
+ unsigned long flags;
+
+ pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
+ sizeof(struct pd_uinfo) * idx);
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ if (dev->pdr_tail != PPC4XX_LAST_PD)
+ dev->pdr_tail++;
+ else
+ dev->pdr_tail = 0;
+ pd_uinfo->state = PD_ENTRY_FREE;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ return 0;
+}
+
+static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
+ dma_addr_t *pd_dma, u32 idx)
+{
+ *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
+
+ return dev->pdr + sizeof(struct ce_pd) * idx;
+}
+
+/**
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
+{
+ dev->gdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
+ if (!dev->gdr)
+ return -ENOMEM;
+
+ memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
+
+ return 0;
+}
+
+static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
+{
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ dev->gdr, dev->gdr_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+{
+ u32 retval;
+ u32 tmp;
+ if (n >= PPC4XX_NUM_GD)
+ return ERING_WAS_FULL;
+
+ retval = dev->gdr_head;
+ tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
+ if (dev->gdr_head > dev->gdr_tail) {
+ if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
+ return ERING_WAS_FULL;
+ } else if (dev->gdr_head < dev->gdr_tail) {
+ if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
+ return ERING_WAS_FULL;
+ }
+ dev->gdr_head = tmp;
+
+ return retval;
+}
+
+static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ if (dev->gdr_tail == dev->gdr_head) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return 0;
+ }
+
+ if (dev->gdr_tail != PPC4XX_LAST_GD)
+ dev->gdr_tail++;
+ else
+ dev->gdr_tail = 0;
+
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ return 0;
+}
+
+static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
+ dma_addr_t *gd_dma, u32 idx)
+{
+ *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
+
+ return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
+}
+
+/**
+ * alloc memory for the scatter ring
+ * need to alloc buf for the ring
+ * sdr_tail, sdr_head and sdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
+{
+ int i;
+ struct ce_sd *sd_array;
+
+ /* alloc memory for scatter descriptor ring */
+ dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ &dev->sdr_pa, GFP_ATOMIC);
+ if (!dev->sdr)
+ return -ENOMEM;
+
+ dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
+ dev->scatter_buffer_va =
+ dma_alloc_coherent(dev->core_dev->device,
+ dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ &dev->scatter_buffer_pa, GFP_ATOMIC);
+ if (!dev->scatter_buffer_va) {
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ dev->sdr, dev->sdr_pa);
+ return -ENOMEM;
+ }
+
+ sd_array = dev->sdr;
+
+ for (i = 0; i < PPC4XX_NUM_SD; i++) {
+ sd_array[i].ptr = dev->scatter_buffer_pa +
+ dev->scatter_buffer_size * i;
+ }
+
+ return 0;
+}
+
+static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
+{
+ if (dev->sdr != NULL)
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ dev->sdr, dev->sdr_pa);
+
+ if (dev->scatter_buffer_va != NULL)
+ dma_free_coherent(dev->core_dev->device,
+ dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ dev->scatter_buffer_va,
+ dev->scatter_buffer_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
+{
+ u32 retval;
+ u32 tmp;
+
+ if (n >= PPC4XX_NUM_SD)
+ return ERING_WAS_FULL;
+
+ retval = dev->sdr_head;
+ tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
+ if (dev->sdr_head > dev->gdr_tail) {
+ if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
+ return ERING_WAS_FULL;
+ } else if (dev->sdr_head < dev->sdr_tail) {
+ if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
+ return ERING_WAS_FULL;
+ } /* the head = tail, or empty case is already take cared */
+ dev->sdr_head = tmp;
+
+ return retval;
+}
+
+static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ if (dev->sdr_tail == dev->sdr_head) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return 0;
+ }
+ if (dev->sdr_tail != PPC4XX_LAST_SD)
+ dev->sdr_tail++;
+ else
+ dev->sdr_tail = 0;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ return 0;
+}
+
+static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
+ dma_addr_t *sd_dma, u32 idx)
+{
+ *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
+
+ return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
+}
+
+static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
+ dma_addr_t *addr, u32 *length,
+ u32 *idx, u32 *offset, u32 *nbytes)
+{
+ u32 len;
+
+ if (*length > dev->scatter_buffer_size) {
+ memcpy(phys_to_virt(*addr),
+ dev->scatter_buffer_va +
+ *idx * dev->scatter_buffer_size + *offset,
+ dev->scatter_buffer_size);
+ *offset = 0;
+ *length -= dev->scatter_buffer_size;
+ *nbytes -= dev->scatter_buffer_size;
+ if (*idx == PPC4XX_LAST_SD)
+ *idx = 0;
+ else
+ (*idx)++;
+ *addr = *addr + dev->scatter_buffer_size;
+ return 1;
+ } else if (*length < dev->scatter_buffer_size) {
+ memcpy(phys_to_virt(*addr),
+ dev->scatter_buffer_va +
+ *idx * dev->scatter_buffer_size + *offset, *length);
+ if ((*offset + *length) == dev->scatter_buffer_size) {
+ if (*idx == PPC4XX_LAST_SD)
+ *idx = 0;
+ else
+ (*idx)++;
+ *nbytes -= *length;
+ *offset = 0;
+ } else {
+ *nbytes -= *length;
+ *offset += *length;
+ }
+
+ return 0;
+ } else {
+ len = (*nbytes <= dev->scatter_buffer_size) ?
+ (*nbytes) : dev->scatter_buffer_size;
+ memcpy(phys_to_virt(*addr),
+ dev->scatter_buffer_va +
+ *idx * dev->scatter_buffer_size + *offset,
+ len);
+ *offset = 0;
+ *nbytes -= len;
+
+ if (*idx == PPC4XX_LAST_SD)
+ *idx = 0;
+ else
+ (*idx)++;
+
+ return 0;
+ }
+}
+
+static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
+ struct ce_pd *pd,
+ struct pd_uinfo *pd_uinfo,
+ u32 nbytes,
+ struct scatterlist *dst)
+{
+ dma_addr_t addr;
+ u32 this_sd;
+ u32 offset;
+ u32 len;
+ u32 i;
+ u32 sg_len;
+ struct scatterlist *sg;
+
+ this_sd = pd_uinfo->first_sd;
+ offset = 0;
+ i = 0;
+
+ while (nbytes) {
+ sg = &dst[i];
+ sg_len = sg->length;
+ addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+ sg->offset, sg->length, DMA_TO_DEVICE);
+
+ if (offset == 0) {
+ len = (nbytes <= sg->length) ? nbytes : sg->length;
+ while (crypto4xx_fill_one_page(dev, &addr, &len,
+ &this_sd, &offset, &nbytes))
+ ;
+ if (!nbytes)
+ return;
+ i++;
+ } else {
+ len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
+ nbytes : (dev->scatter_buffer_size - offset);
+ len = (sg->length < len) ? sg->length : len;
+ while (crypto4xx_fill_one_page(dev, &addr, &len,
+ &this_sd, &offset, &nbytes))
+ ;
+ if (!nbytes)
+ return;
+ sg_len -= len;
+ if (sg_len) {
+ addr += len;
+ while (crypto4xx_fill_one_page(dev, &addr,
+ &sg_len, &this_sd, &offset, &nbytes))
+ ;
+ }
+ i++;
+ }
+ }
+}
+
+static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
+ struct crypto4xx_ctx *ctx)
+{
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ struct sa_state_record *state_record =
+ (struct sa_state_record *) pd_uinfo->sr_va;
+
+ if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
+ memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
+ SA_HASH_ALG_SHA1_DIGEST_SIZE);
+ }
+
+ return 0;
+}
+
+static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo)
+{
+ int i;
+ if (pd_uinfo->num_gd) {
+ for (i = 0; i < pd_uinfo->num_gd; i++)
+ crypto4xx_put_gd_to_gdr(dev);
+ pd_uinfo->first_gd = 0xffffffff;
+ pd_uinfo->num_gd = 0;
+ }
+ if (pd_uinfo->num_sd) {
+ for (i = 0; i < pd_uinfo->num_sd; i++)
+ crypto4xx_put_sd_to_sdr(dev);
+
+ pd_uinfo->first_sd = 0xffffffff;
+ pd_uinfo->num_sd = 0;
+ }
+}
+
+static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
+{
+ struct crypto4xx_ctx *ctx;
+ struct ablkcipher_request *ablk_req;
+ struct scatterlist *dst;
+ dma_addr_t addr;
+
+ ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
+ ctx = crypto_tfm_ctx(ablk_req->base.tfm);
+
+ if (pd_uinfo->using_sd) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
+ ablk_req->dst);
+ } else {
+ dst = pd_uinfo->dest_va;
+ addr = dma_map_page(dev->core_dev->device, sg_page(dst),
+ dst->offset, dst->length, DMA_FROM_DEVICE);
+ }
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ if (ablk_req->base.complete != NULL)
+ ablk_req->base.complete(&ablk_req->base, 0);
+
+ return 0;
+}
+
+static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo)
+{
+ struct crypto4xx_ctx *ctx;
+ struct ahash_request *ahash_req;
+
+ ahash_req = ahash_request_cast(pd_uinfo->async_req);
+ ctx = crypto_tfm_ctx(ahash_req->base.tfm);
+
+ crypto4xx_copy_digest_to_dst(pd_uinfo,
+ crypto_tfm_ctx(ahash_req->base.tfm));
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ /* call user provided callback function x */
+ if (ahash_req->base.complete != NULL)
+ ahash_req->base.complete(&ahash_req->base, 0);
+
+ return 0;
+}
+
+static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+{
+ struct ce_pd *pd;
+ struct pd_uinfo *pd_uinfo;
+
+ pd = dev->pdr + sizeof(struct ce_pd)*idx;
+ pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
+ if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+ CRYPTO_ALG_TYPE_ABLKCIPHER)
+ return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
+ else
+ return crypto4xx_ahash_done(dev, pd_uinfo);
+}
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+void crypto4xx_memcpy_le(unsigned int *dst,
+ const unsigned char *buf,
+ int len)
+{
+ u8 *tmp;
+ for (; len >= 4; buf += 4, len -= 4)
+ *dst++ = cpu_to_le32(*(unsigned int *) buf);
+
+ tmp = (u8 *)dst;
+ switch (len) {
+ case 3:
+ *tmp++ = 0;
+ *tmp++ = *(buf+2);
+ *tmp++ = *(buf+1);
+ *tmp++ = *buf;
+ break;
+ case 2:
+ *tmp++ = 0;
+ *tmp++ = 0;
+ *tmp++ = *(buf+1);
+ *tmp++ = *buf;
+ break;
+ case 1:
+ *tmp++ = 0;
+ *tmp++ = 0;
+ *tmp++ = 0;
+ *tmp++ = *buf;
+ break;
+ default:
+ break;
+ }
+}
+
+static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
+{
+ crypto4xx_destroy_pdr(core_dev->dev);
+ crypto4xx_destroy_gdr(core_dev->dev);
+ crypto4xx_destroy_sdr(core_dev->dev);
+ dev_set_drvdata(core_dev->device, NULL);
+ iounmap(core_dev->dev->ce_base);
+ kfree(core_dev->dev);
+ kfree(core_dev);
+}
+
+void crypto4xx_return_pd(struct crypto4xx_device *dev,
+ u32 pd_entry, struct ce_pd *pd,
+ struct pd_uinfo *pd_uinfo)
+{
+ /* irq should be already disabled */
+ dev->pdr_head = pd_entry;
+ pd->pd_ctl.w = 0;
+ pd->pd_ctl_len.w = 0;
+ pd_uinfo->state = PD_ENTRY_FREE;
+}
+
+/*
+ * derive number of elements in scatterlist
+ * Shamlessly copy from talitos.c
+ */
+static int get_sg_count(struct scatterlist *sg_list, int nbytes)
+{
+ struct scatterlist *sg = sg_list;
+ int sg_nents = 0;
+
+ while (nbytes) {
+ sg_nents++;
+ if (sg->length > nbytes)
+ break;
+ nbytes -= sg->length;
+ sg = sg_next(sg);
+ }
+
+ return sg_nents;
+}
+
+static u32 get_next_gd(u32 current)
+{
+ if (current != PPC4XX_LAST_GD)
+ return current + 1;
+ else
+ return 0;
+}
+
+static u32 get_next_sd(u32 current)
+{
+ if (current != PPC4XX_LAST_SD)
+ return current + 1;
+ else
+ return 0;
+}
+
+u32 crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int datalen,
+ void *iv, u32 iv_len)
+{
+ struct crypto4xx_device *dev = ctx->dev;
+ dma_addr_t addr, pd_dma, sd_dma, gd_dma;
+ struct dynamic_sa_ctl *sa;
+ struct scatterlist *sg;
+ struct ce_gd *gd;
+ struct ce_pd *pd;
+ u32 num_gd, num_sd;
+ u32 fst_gd = 0xffffffff;
+ u32 fst_sd = 0xffffffff;
+ u32 pd_entry;
+ unsigned long flags;
+ struct pd_uinfo *pd_uinfo = NULL;
+ unsigned int nbytes = datalen, idx;
+ unsigned int ivlen = 0;
+ u32 gd_idx = 0;
+
+ /* figure how many gd is needed */
+ num_gd = get_sg_count(src, datalen);
+ if (num_gd == 1)
+ num_gd = 0;
+
+ /* figure how many sd is needed */
+ if (sg_is_last(dst) || ctx->is_hash) {
+ num_sd = 0;
+ } else {
+ if (datalen > PPC4XX_SD_BUFFER_SIZE) {
+ num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
+ if (datalen % PPC4XX_SD_BUFFER_SIZE)
+ num_sd++;
+ } else {
+ num_sd = 1;
+ }
+ }
+
+ /*
+ * The follow section of code needs to be protected
+ * The gather ring and scatter ring needs to be consecutive
+ * In case of run out of any kind of descriptor, the descriptor
+ * already got must be return the original place.
+ */
+ spin_lock_irqsave(&dev->core_dev->lock, flags);
+ if (num_gd) {
+ fst_gd = crypto4xx_get_n_gd(dev, num_gd);
+ if (fst_gd == ERING_WAS_FULL) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EAGAIN;
+ }
+ }
+ if (num_sd) {
+ fst_sd = crypto4xx_get_n_sd(dev, num_sd);
+ if (fst_sd == ERING_WAS_FULL) {
+ if (num_gd)
+ dev->gdr_head = fst_gd;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EAGAIN;
+ }
+ }
+ pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
+ if (pd_entry == ERING_WAS_FULL) {
+ if (num_gd)
+ dev->gdr_head = fst_gd;
+ if (num_sd)
+ dev->sdr_head = fst_sd;
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EAGAIN;
+ }
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+ pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
+ sizeof(struct pd_uinfo) * pd_entry);
+ pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+ pd_uinfo->async_req = req;
+ pd_uinfo->num_gd = num_gd;
+ pd_uinfo->num_sd = num_sd;
+
+ if (iv_len || ctx->is_hash) {
+ ivlen = iv_len;
+ pd->sa = pd_uinfo->sa_pa;
+ sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
+ if (ctx->direction == DIR_INBOUND)
+ memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
+ else
+ memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
+
+ memcpy((void *) sa + ctx->offset_to_sr_ptr,
+ &pd_uinfo->sr_pa, 4);
+
+ if (iv_len)
+ crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
+ } else {
+ if (ctx->direction == DIR_INBOUND) {
+ pd->sa = ctx->sa_in_dma_addr;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ } else {
+ pd->sa = ctx->sa_out_dma_addr;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ }
+ }
+ pd->sa_len = ctx->sa_len;
+ if (num_gd) {
+ /* get first gd we are going to use */
+ gd_idx = fst_gd;
+ pd_uinfo->first_gd = fst_gd;
+ pd_uinfo->num_gd = num_gd;
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ pd->src = gd_dma;
+ /* enable gather */
+ sa->sa_command_0.bf.gather = 1;
+ idx = 0;
+ src = &src[0];
+ /* walk the sg, and setup gather array */
+ while (nbytes) {
+ sg = &src[idx];
+ addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+ sg->offset, sg->length, DMA_TO_DEVICE);
+ gd->ptr = addr;
+ gd->ctl_len.len = sg->length;
+ gd->ctl_len.done = 0;
+ gd->ctl_len.ready = 1;
+ if (sg->length >= nbytes)
+ break;
+ nbytes -= sg->length;
+ gd_idx = get_next_gd(gd_idx);
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ idx++;
+ }
+ } else {
+ pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
+ src->offset, src->length, DMA_TO_DEVICE);
+ /*
+ * Disable gather in sa command
+ */
+ sa->sa_command_0.bf.gather = 0;
+ /*
+ * Indicate gather array is not used
+ */
+ pd_uinfo->first_gd = 0xffffffff;
+ pd_uinfo->num_gd = 0;
+ }
+ if (ctx->is_hash || sg_is_last(dst)) {
+ /*
+ * we know application give us dst a whole piece of memory
+ * no need to use scatter ring.
+ * In case of is_hash, the icv is always at end of src data.
+ */
+ pd_uinfo->using_sd = 0;
+ pd_uinfo->first_sd = 0xffffffff;
+ pd_uinfo->num_sd = 0;
+ pd_uinfo->dest_va = dst;
+ sa->sa_command_0.bf.scatter = 0;
+ if (ctx->is_hash)
+ pd->dest = virt_to_phys((void *)dst);
+ else
+ pd->dest = (u32)dma_map_page(dev->core_dev->device,
+ sg_page(dst), dst->offset,
+ dst->length, DMA_TO_DEVICE);
+ } else {
+ struct ce_sd *sd = NULL;
+ u32 sd_idx = fst_sd;
+ nbytes = datalen;
+ sa->sa_command_0.bf.scatter = 1;
+ pd_uinfo->using_sd = 1;
+ pd_uinfo->dest_va = dst;
+ pd_uinfo->first_sd = fst_sd;
+ pd_uinfo->num_sd = num_sd;
+ sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+ pd->dest = sd_dma;
+ /* setup scatter descriptor */
+ sd->ctl.done = 0;
+ sd->ctl.rdy = 1;
+ /* sd->ptr should be setup by sd_init routine*/
+ idx = 0;
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ nbytes -= PPC4XX_SD_BUFFER_SIZE;
+ else
+ nbytes = 0;
+ while (nbytes) {
+ sd_idx = get_next_sd(sd_idx);
+ sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+ /* setup scatter descriptor */
+ sd->ctl.done = 0;
+ sd->ctl.rdy = 1;
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ nbytes -= PPC4XX_SD_BUFFER_SIZE;
+ else
+ /*
+ * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
+ * which is more than nbytes, so done.
+ */
+ nbytes = 0;
+ }
+ }
+
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ pd->pd_ctl.w = ctx->pd_ctl;
+ pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
+ pd_uinfo->state = PD_ENTRY_INUSE;
+ wmb();
+ /* write any value to push engine to read a pd */
+ writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+ return -EINPROGRESS;
+}
+
+/**
+ * Algorithm Registration Functions
+ */
+static int crypto4xx_alg_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->dev = amcc_alg->dev;
+ ctx->sa_in = NULL;
+ ctx->sa_out = NULL;
+ ctx->sa_in_dma_addr = 0;
+ ctx->sa_out_dma_addr = 0;
+ ctx->sa_len = 0;
+
+ if (alg->cra_type == &crypto_ablkcipher_type)
+ tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
+ else if (alg->cra_type == &crypto_ahash_type)
+ tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+
+ return 0;
+}
+
+static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto4xx_free_sa(ctx);
+ crypto4xx_free_state_record(ctx);
+}
+
+int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+ struct crypto_alg *crypto_alg, int array_size)
+{
+ struct crypto4xx_alg *alg;
+ int i;
+ int rc = 0;
+
+ for (i = 0; i < array_size; i++) {
+ alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
+ if (!alg)
+ return -ENOMEM;
+
+ alg->alg = crypto_alg[i];
+ INIT_LIST_HEAD(&alg->alg.cra_list);
+ if (alg->alg.cra_init == NULL)
+ alg->alg.cra_init = crypto4xx_alg_init;
+ if (alg->alg.cra_exit == NULL)
+ alg->alg.cra_exit = crypto4xx_alg_exit;
+ alg->dev = sec_dev;
+ rc = crypto_register_alg(&alg->alg);
+ if (rc) {
+ list_del(&alg->entry);
+ kfree(alg);
+ } else {
+ list_add_tail(&alg->entry, &sec_dev->alg_list);
+ }
+ }
+
+ return 0;
+}
+
+static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
+{
+ struct crypto4xx_alg *alg, *tmp;
+
+ list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
+ list_del(&alg->entry);
+ crypto_unregister_alg(&alg->alg);
+ kfree(alg);
+ }
+}
+
+static void crypto4xx_bh_tasklet_cb(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ struct pd_uinfo *pd_uinfo;
+ struct ce_pd *pd;
+ u32 tail;
+
+ while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
+ tail = core_dev->dev->pdr_tail;
+ pd_uinfo = core_dev->dev->pdr_uinfo +
+ sizeof(struct pd_uinfo)*tail;
+ pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
+ if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
+ pd->pd_ctl.bf.pe_done &&
+ !pd->pd_ctl.bf.host_ready) {
+ pd->pd_ctl.bf.pe_done = 0;
+ crypto4xx_pd_done(core_dev->dev, tail);
+ crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
+ pd_uinfo->state = PD_ENTRY_FREE;
+ } else {
+ /* if tail not done, break */
+ break;
+ }
+ }
+}
+
+/**
+ * Top Half of isr.
+ */
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ struct device *dev = (struct device *)data;
+ struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+ if (core_dev->dev->ce_base == 0)
+ return 0;
+
+ writel(PPC4XX_INTERRUPT_CLR,
+ core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ tasklet_schedule(&core_dev->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Supported Crypto Algorithms
+ */
+struct crypto_alg crypto4xx_alg[] = {
+ /* Crypto AES modes */
+ {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cbc,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ },
+ /* Hash SHA1 */
+ {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha1_alg_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ahash = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ }
+ }
+ },
+};
+
+/**
+ * Module Initialization Routine
+ */
+static int __init crypto4xx_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int rc;
+ struct resource res;
+ struct device *dev = &ofdev->dev;
+ struct crypto4xx_core_device *core_dev;
+
+ rc = of_address_to_resource(ofdev->node, 0, &res);
+ if (rc)
+ return -ENODEV;
+
+ if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
+ mtdcri(SDR0, PPC460EX_SDR0_SRST,
+ mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
+ mtdcri(SDR0, PPC460EX_SDR0_SRST,
+ mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
+ } else if (of_find_compatible_node(NULL, NULL,
+ "amcc,ppc405ex-crypto")) {
+ mtdcri(SDR0, PPC405EX_SDR0_SRST,
+ mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
+ mtdcri(SDR0, PPC405EX_SDR0_SRST,
+ mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ } else if (of_find_compatible_node(NULL, NULL,
+ "amcc,ppc460sx-crypto")) {
+ mtdcri(SDR0, PPC460SX_SDR0_SRST,
+ mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
+ mtdcri(SDR0, PPC460SX_SDR0_SRST,
+ mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
+ } else {
+ printk(KERN_ERR "Crypto Function Not supported!\n");
+ return -EINVAL;
+ }
+
+ core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
+ if (!core_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, core_dev);
+ core_dev->ofdev = ofdev;
+ core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
+ if (!core_dev->dev)
+ goto err_alloc_dev;
+
+ core_dev->dev->core_dev = core_dev;
+ core_dev->device = dev;
+ spin_lock_init(&core_dev->lock);
+ INIT_LIST_HEAD(&core_dev->dev->alg_list);
+ rc = crypto4xx_build_pdr(core_dev->dev);
+ if (rc)
+ goto err_build_pdr;
+
+ rc = crypto4xx_build_gdr(core_dev->dev);
+ if (rc)
+ goto err_build_gdr;
+
+ rc = crypto4xx_build_sdr(core_dev->dev);
+ if (rc)
+ goto err_build_sdr;
+
+ /* Init tasklet for bottom half processing */
+ tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
+ (unsigned long) dev);
+
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
+ rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
+ core_dev->dev->name, dev);
+ if (rc)
+ goto err_request_irq;
+
+ core_dev->dev->ce_base = of_iomap(ofdev->node, 0);
+ if (!core_dev->dev->ce_base) {
+ dev_err(dev, "failed to of_iomap\n");
+ goto err_iomap;
+ }
+
+ /* need to setup pdr, rdr, gdr and sdr before this */
+ crypto4xx_hw_init(core_dev->dev);
+
+ /* Register security algorithms with Linux CryptoAPI */
+ rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
+ ARRAY_SIZE(crypto4xx_alg));
+ if (rc)
+ goto err_start_dev;
+
+ return 0;
+
+err_start_dev:
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
+ free_irq(core_dev->irq, dev);
+ irq_dispose_mapping(core_dev->irq);
+ tasklet_kill(&core_dev->tasklet);
+err_request_irq:
+ crypto4xx_destroy_sdr(core_dev->dev);
+err_build_sdr:
+ crypto4xx_destroy_gdr(core_dev->dev);
+err_build_gdr:
+ crypto4xx_destroy_pdr(core_dev->dev);
+err_build_pdr:
+ kfree(core_dev->dev);
+err_alloc_dev:
+ kfree(core_dev);
+
+ return rc;
+}
+
+static int __exit crypto4xx_remove(struct of_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+ free_irq(core_dev->irq, dev);
+ irq_dispose_mapping(core_dev->irq);
+
+ tasklet_kill(&core_dev->tasklet);
+ /* Un-register with Linux CryptoAPI */
+ crypto4xx_unregister_alg(core_dev->dev);
+ /* Free all allocated memory */
+ crypto4xx_stop_all(core_dev);
+
+ return 0;
+}
+
+static struct of_device_id crypto4xx_match[] = {
+ { .compatible = "amcc,ppc4xx-crypto",},
+ { },
+};
+
+static struct of_platform_driver crypto4xx_driver = {
+ .name = "crypto4xx",
+ .match_table = crypto4xx_match,
+ .probe = crypto4xx_probe,
+ .remove = crypto4xx_remove,
+};
+
+static int __init crypto4xx_init(void)
+{
+ return of_register_platform_driver(&crypto4xx_driver);
+}
+
+static void __exit crypto4xx_exit(void)
+{
+ of_unregister_platform_driver(&crypto4xx_driver);
+}
+
+module_init(crypto4xx_init);
+module_exit(crypto4xx_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
+MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
+
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
new file mode 100644
index 00000000000..1ef10344936
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -0,0 +1,177 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This is the header file for AMCC Crypto offload Linux device driver for
+ * use with Linux CryptoAPI.
+
+ */
+
+#ifndef __CRYPTO4XX_CORE_H__
+#define __CRYPTO4XX_CORE_H__
+
+#define PPC460SX_SDR0_SRST 0x201
+#define PPC405EX_SDR0_SRST 0x200
+#define PPC460EX_SDR0_SRST 0x201
+#define PPC460EX_CE_RESET 0x08000000
+#define PPC460SX_CE_RESET 0x20000000
+#define PPC405EX_CE_RESET 0x00000008
+
+#define CRYPTO4XX_CRYPTO_PRIORITY 300
+#define PPC4XX_LAST_PD 63
+#define PPC4XX_NUM_PD 64
+#define PPC4XX_LAST_GD 1023
+#define PPC4XX_NUM_GD 1024
+#define PPC4XX_LAST_SD 63
+#define PPC4XX_NUM_SD 64
+#define PPC4XX_SD_BUFFER_SIZE 2048
+
+#define PD_ENTRY_INUSE 1
+#define PD_ENTRY_FREE 0
+#define ERING_WAS_FULL 0xffffffff
+
+struct crypto4xx_device;
+
+struct pd_uinfo {
+ struct crypto4xx_device *dev;
+ u32 state;
+ u32 using_sd;
+ u32 first_gd; /* first gather discriptor
+ used by this packet */
+ u32 num_gd; /* number of gather discriptor
+ used by this packet */
+ u32 first_sd; /* first scatter discriptor
+ used by this packet */
+ u32 num_sd; /* number of scatter discriptors
+ used by this packet */
+ void *sa_va; /* shadow sa, when using cp from ctx->sa */
+ u32 sa_pa;
+ void *sr_va; /* state record for shadow sa */
+ u32 sr_pa;
+ struct scatterlist *dest_va;
+ struct crypto_async_request *async_req; /* base crypto request
+ for this packet */
+};
+
+struct crypto4xx_device {
+ struct crypto4xx_core_device *core_dev;
+ char *name;
+ u64 ce_phy_address;
+ void __iomem *ce_base;
+
+ void *pdr; /* base address of packet
+ descriptor ring */
+ dma_addr_t pdr_pa; /* physical address used to
+ program ce pdr_base_register */
+ void *gdr; /* gather descriptor ring */
+ dma_addr_t gdr_pa; /* physical address used to
+ program ce gdr_base_register */
+ void *sdr; /* scatter descriptor ring */
+ dma_addr_t sdr_pa; /* physical address used to
+ program ce sdr_base_register */
+ void *scatter_buffer_va;
+ dma_addr_t scatter_buffer_pa;
+ u32 scatter_buffer_size;
+
+ void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
+ dma_addr_t shadow_sa_pool_pa;
+ void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
+ dma_addr_t shadow_sr_pool_pa;
+ u32 pdr_tail;
+ u32 pdr_head;
+ u32 gdr_tail;
+ u32 gdr_head;
+ u32 sdr_tail;
+ u32 sdr_head;
+ void *pdr_uinfo;
+ struct list_head alg_list; /* List of algorithm supported
+ by this device */
+};
+
+struct crypto4xx_core_device {
+ struct device *device;
+ struct of_device *ofdev;
+ struct crypto4xx_device *dev;
+ u32 int_status;
+ u32 irq;
+ struct tasklet_struct tasklet;
+ spinlock_t lock;
+};
+
+struct crypto4xx_ctx {
+ struct crypto4xx_device *dev;
+ void *sa_in;
+ dma_addr_t sa_in_dma_addr;
+ void *sa_out;
+ dma_addr_t sa_out_dma_addr;
+ void *state_record;
+ dma_addr_t state_record_dma_addr;
+ u32 sa_len;
+ u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
+ u32 direction;
+ u32 next_hdr;
+ u32 save_iv;
+ u32 pd_ctl_len;
+ u32 pd_ctl;
+ u32 bypass;
+ u32 is_hash;
+ u32 hash_final;
+};
+
+struct crypto4xx_req_ctx {
+ struct crypto4xx_device *dev; /* Device in which
+ operation to send to */
+ void *sa;
+ u32 sa_dma_addr;
+ u16 sa_len;
+};
+
+struct crypto4xx_alg {
+ struct list_head entry;
+ struct crypto_alg alg;
+ struct crypto4xx_device *dev;
+};
+
+#define crypto_alg_to_crypto4xx_alg(x) \
+ container_of(x, struct crypto4xx_alg, alg)
+
+extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
+ struct crypto4xx_ctx *rctx);
+extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
+extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
+extern void crypto4xx_memcpy_le(unsigned int *dst,
+ const unsigned char *buf, int len);
+extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int datalen,
+ void *iv, u32 iv_len);
+extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+extern int crypto4xx_encrypt(struct ablkcipher_request *req);
+extern int crypto4xx_decrypt(struct ablkcipher_request *req);
+extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_hash_digest(struct ahash_request *req);
+extern int crypto4xx_hash_final(struct ahash_request *req);
+extern int crypto4xx_hash_update(struct ahash_request *req);
+extern int crypto4xx_hash_init(struct ahash_request *req);
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
new file mode 100644
index 00000000000..7d4edb00261
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -0,0 +1,284 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This filr defines the register set for Security Subsystem
+ */
+
+#ifndef __CRYPTO4XX_REG_DEF_H__
+#define __CRYPTO4XX_REG_DEF_H__
+
+/* CRYPTO4XX Register offset */
+#define CRYPTO4XX_DESCRIPTOR 0x00000000
+#define CRYPTO4XX_CTRL_STAT 0x00000000
+#define CRYPTO4XX_SOURCE 0x00000004
+#define CRYPTO4XX_DEST 0x00000008
+#define CRYPTO4XX_SA 0x0000000C
+#define CRYPTO4XX_SA_LENGTH 0x00000010
+#define CRYPTO4XX_LENGTH 0x00000014
+
+#define CRYPTO4XX_PE_DMA_CFG 0x00000040
+#define CRYPTO4XX_PE_DMA_STAT 0x00000044
+#define CRYPTO4XX_PDR_BASE 0x00000048
+#define CRYPTO4XX_RDR_BASE 0x0000004c
+#define CRYPTO4XX_RING_SIZE 0x00000050
+#define CRYPTO4XX_RING_CTRL 0x00000054
+#define CRYPTO4XX_INT_RING_STAT 0x00000058
+#define CRYPTO4XX_EXT_RING_STAT 0x0000005c
+#define CRYPTO4XX_IO_THRESHOLD 0x00000060
+#define CRYPTO4XX_GATH_RING_BASE 0x00000064
+#define CRYPTO4XX_SCAT_RING_BASE 0x00000068
+#define CRYPTO4XX_PART_RING_SIZE 0x0000006c
+#define CRYPTO4XX_PART_RING_CFG 0x00000070
+
+#define CRYPTO4XX_PDR_BASE_UADDR 0x00000080
+#define CRYPTO4XX_RDR_BASE_UADDR 0x00000084
+#define CRYPTO4XX_PKT_SRC_UADDR 0x00000088
+#define CRYPTO4XX_PKT_DEST_UADDR 0x0000008c
+#define CRYPTO4XX_SA_UADDR 0x00000090
+#define CRYPTO4XX_GATH_RING_BASE_UADDR 0x000000A0
+#define CRYPTO4XX_SCAT_RING_BASE_UADDR 0x000000A4
+
+#define CRYPTO4XX_SEQ_RD 0x00000408
+#define CRYPTO4XX_SEQ_MASK_RD 0x0000040C
+
+#define CRYPTO4XX_SA_CMD_0 0x00010600
+#define CRYPTO4XX_SA_CMD_1 0x00010604
+
+#define CRYPTO4XX_STATE_PTR 0x000106dc
+#define CRYPTO4XX_STATE_IV 0x00010700
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_0 0x00010710
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_1 0x00010714
+
+#define CRYPTO4XX_STATE_IDIGEST_0 0x00010718
+#define CRYPTO4XX_STATE_IDIGEST_1 0x0001071c
+
+#define CRYPTO4XX_DATA_IN 0x00018000
+#define CRYPTO4XX_DATA_OUT 0x0001c000
+
+#define CRYPTO4XX_INT_UNMASK_STAT 0x000500a0
+#define CRYPTO4XX_INT_MASK_STAT 0x000500a4
+#define CRYPTO4XX_INT_CLR 0x000500a4
+#define CRYPTO4XX_INT_EN 0x000500a8
+
+#define CRYPTO4XX_INT_PKA 0x00000002
+#define CRYPTO4XX_INT_PDR_DONE 0x00008000
+#define CRYPTO4XX_INT_MA_WR_ERR 0x00020000
+#define CRYPTO4XX_INT_MA_RD_ERR 0x00010000
+#define CRYPTO4XX_INT_PE_ERR 0x00000200
+#define CRYPTO4XX_INT_USER_DMA_ERR 0x00000040
+#define CRYPTO4XX_INT_SLAVE_ERR 0x00000010
+#define CRYPTO4XX_INT_MASTER_ERR 0x00000008
+#define CRYPTO4XX_INT_ERROR 0x00030258
+
+#define CRYPTO4XX_INT_CFG 0x000500ac
+#define CRYPTO4XX_INT_DESCR_RD 0x000500b0
+#define CRYPTO4XX_INT_DESCR_CNT 0x000500b4
+#define CRYPTO4XX_INT_TIMEOUT_CNT 0x000500b8
+
+#define CRYPTO4XX_DEVICE_CTRL 0x00060080
+#define CRYPTO4XX_DEVICE_ID 0x00060084
+#define CRYPTO4XX_DEVICE_INFO 0x00060088
+#define CRYPTO4XX_DMA_USER_SRC 0x00060094
+#define CRYPTO4XX_DMA_USER_DEST 0x00060098
+#define CRYPTO4XX_DMA_USER_CMD 0x0006009C
+
+#define CRYPTO4XX_DMA_CFG 0x000600d4
+#define CRYPTO4XX_BYTE_ORDER_CFG 0x000600d8
+#define CRYPTO4XX_ENDIAN_CFG 0x000600d8
+
+#define CRYPTO4XX_PRNG_STAT 0x00070000
+#define CRYPTO4XX_PRNG_CTRL 0x00070004
+#define CRYPTO4XX_PRNG_SEED_L 0x00070008
+#define CRYPTO4XX_PRNG_SEED_H 0x0007000c
+
+#define CRYPTO4XX_PRNG_RES_0 0x00070020
+#define CRYPTO4XX_PRNG_RES_1 0x00070024
+#define CRYPTO4XX_PRNG_RES_2 0x00070028
+#define CRYPTO4XX_PRNG_RES_3 0x0007002C
+
+#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
+#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
+
+/**
+ * Initilize CRYPTO ENGINE registers, and memory bases.
+ */
+#define PPC4XX_PDR_POLL 0x3ff
+#define PPC4XX_OUTPUT_THRESHOLD 2
+#define PPC4XX_INPUT_THRESHOLD 2
+#define PPC4XX_PD_SIZE 6
+#define PPC4XX_CTX_DONE_INT 0x2000
+#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_BYTE_ORDER 0x22222
+#define PPC4XX_INTERRUPT_CLR 0x3ffff
+#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
+#define PPC4XX_DC_3DES_EN 1
+#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_CFG 1
+/**
+ * all follow define are ad hoc
+ */
+#define PPC4XX_RING_RETRY 100
+#define PPC4XX_RING_POLL 100
+#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
+#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
+
+/**
+ * Generic Security Association (SA) with all possible fields. These will
+ * never likely used except for reference purpose. These structure format
+ * can be not changed as the hardware expects them to be layout as defined.
+ * Field can be removed or reduced but ordering can not be changed.
+ */
+#define CRYPTO4XX_DMA_CFG_OFFSET 0x40
+union ce_pe_dma_cfg {
+ struct {
+ u32 rsv:7;
+ u32 dir_host:1;
+ u32 rsv1:2;
+ u32 bo_td_en:1;
+ u32 dis_pdr_upd:1;
+ u32 bo_sgpd_en:1;
+ u32 bo_data_en:1;
+ u32 bo_sa_en:1;
+ u32 bo_pd_en:1;
+ u32 rsv2:4;
+ u32 dynamic_sa_en:1;
+ u32 pdr_mode:2;
+ u32 pe_mode:1;
+ u32 rsv3:5;
+ u32 reset_sg:1;
+ u32 reset_pdr:1;
+ u32 reset_pe:1;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_PDR_BASE_OFFSET 0x48
+#define CRYPTO4XX_RDR_BASE_OFFSET 0x4c
+#define CRYPTO4XX_RING_SIZE_OFFSET 0x50
+union ce_ring_size {
+ struct {
+ u32 ring_offset:16;
+ u32 rsv:6;
+ u32 ring_size:10;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54
+union ce_ring_contol {
+ struct {
+ u32 continuous:1;
+ u32 rsv:5;
+ u32 ring_retry_divisor:10;
+ u32 rsv1:4;
+ u32 ring_poll_divisor:10;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_IO_THRESHOLD_OFFSET 0x60
+union ce_io_threshold {
+ struct {
+ u32 rsv:6;
+ u32 output_threshold:10;
+ u32 rsv1:6;
+ u32 input_threshold:10;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_GATHER_RING_BASE_OFFSET 0x64
+#define CRYPTO4XX_SCATTER_RING_BASE_OFFSET 0x68
+
+union ce_part_ring_size {
+ struct {
+ u32 sdr_size:16;
+ u32 gdr_size:16;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define MAX_BURST_SIZE_32 0
+#define MAX_BURST_SIZE_64 1
+#define MAX_BURST_SIZE_128 2
+#define MAX_BURST_SIZE_256 3
+
+/* gather descriptor control length */
+struct gd_ctl_len {
+ u32 len:16;
+ u32 rsv:14;
+ u32 done:1;
+ u32 ready:1;
+} __attribute__((packed));
+
+struct ce_gd {
+ u32 ptr;
+ struct gd_ctl_len ctl_len;
+} __attribute__((packed));
+
+struct sd_ctl {
+ u32 ctl:30;
+ u32 done:1;
+ u32 rdy:1;
+} __attribute__((packed));
+
+struct ce_sd {
+ u32 ptr;
+ struct sd_ctl ctl;
+} __attribute__((packed));
+
+#define PD_PAD_CTL_32 0x10
+#define PD_PAD_CTL_64 0x20
+#define PD_PAD_CTL_128 0x40
+#define PD_PAD_CTL_256 0x80
+union ce_pd_ctl {
+ struct {
+ u32 pd_pad_ctl:8;
+ u32 status:8;
+ u32 next_hdr:8;
+ u32 rsv:2;
+ u32 cached_sa:1;
+ u32 hash_final:1;
+ u32 init_arc4:1;
+ u32 rsv1:1;
+ u32 pe_done:1;
+ u32 host_ready:1;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+union ce_pd_ctl_len {
+ struct {
+ u32 bypass:8;
+ u32 pe_done:1;
+ u32 host_ready:1;
+ u32 rsv:2;
+ u32 pkt_len:20;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+struct ce_pd {
+ union ce_pd_ctl pd_ctl;
+ u32 src;
+ u32 dest;
+ u32 sa; /* get from ctx->sa_dma_addr */
+ u32 sa_len; /* only if dynamic sa is used */
+ union ce_pd_ctl_len pd_ctl_len;
+
+} __attribute__((packed));
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
new file mode 100644
index 00000000000..466fd94cd4a
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_sa.c
@@ -0,0 +1,108 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * @file crypto4xx_sa.c
+ *
+ * This file implements the security context
+ * assoicate format.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
+#include "crypto4xx_core.h"
+
+u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi
+ + cts.bf.seq_num0
+ + cts.bf.seq_num1
+ + cts.bf.seq_num_mask0
+ + cts.bf.seq_num_mask1
+ + cts.bf.seq_num_mask2
+ + cts.bf.seq_num_mask3;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi
+ + cts.bf.seq_num0
+ + cts.bf.seq_num1
+ + cts.bf.seq_num_mask0
+ + cts.bf.seq_num_mask1
+ + cts.bf.seq_num_mask2
+ + cts.bf.seq_num_mask3
+ + cts.bf.iv0
+ + cts.bf.iv1
+ + cts.bf.iv2
+ + cts.bf.iv3;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
+{
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+ return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
+}
+
+u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
+{
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+
+ return sizeof(struct dynamic_sa_ctl);
+}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
new file mode 100644
index 00000000000..4b83ed7e557
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -0,0 +1,243 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file defines the security context
+ * assoicate format.
+ */
+
+#ifndef __CRYPTO4XX_SA_H__
+#define __CRYPTO4XX_SA_H__
+
+#define AES_IV_SIZE 16
+
+/**
+ * Contents of Dynamic Security Association (SA) with all possible fields
+ */
+union dynamic_sa_contents {
+ struct {
+ u32 arc4_state_ptr:1;
+ u32 arc4_ij_ptr:1;
+ u32 state_ptr:1;
+ u32 iv3:1;
+ u32 iv2:1;
+ u32 iv1:1;
+ u32 iv0:1;
+ u32 seq_num_mask3:1;
+ u32 seq_num_mask2:1;
+ u32 seq_num_mask1:1;
+ u32 seq_num_mask0:1;
+ u32 seq_num1:1;
+ u32 seq_num0:1;
+ u32 spi:1;
+ u32 outer_size:5;
+ u32 inner_size:5;
+ u32 key_size:4;
+ u32 cmd_size:4;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define DIR_OUTBOUND 0
+#define DIR_INBOUND 1
+#define SA_OP_GROUP_BASIC 0
+#define SA_OPCODE_ENCRYPT 0
+#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_HASH 3
+#define SA_CIPHER_ALG_DES 0
+#define SA_CIPHER_ALG_3DES 1
+#define SA_CIPHER_ALG_ARC4 2
+#define SA_CIPHER_ALG_AES 3
+#define SA_CIPHER_ALG_KASUMI 4
+#define SA_CIPHER_ALG_NULL 15
+
+#define SA_HASH_ALG_MD5 0
+#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_NULL 15
+#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
+
+#define SA_LOAD_HASH_FROM_SA 0
+#define SA_LOAD_HASH_FROM_STATE 2
+#define SA_NOT_LOAD_HASH 3
+#define SA_LOAD_IV_FROM_SA 0
+#define SA_LOAD_IV_FROM_INPUT 1
+#define SA_LOAD_IV_FROM_STATE 2
+#define SA_LOAD_IV_GEN_IV 3
+
+#define SA_PAD_TYPE_CONSTANT 2
+#define SA_PAD_TYPE_ZERO 3
+#define SA_PAD_TYPE_TLS 5
+#define SA_PAD_TYPE_DTLS 5
+#define SA_NOT_SAVE_HASH 0
+#define SA_SAVE_HASH 1
+#define SA_NOT_SAVE_IV 0
+#define SA_SAVE_IV 1
+#define SA_HEADER_PROC 1
+#define SA_NO_HEADER_PROC 0
+
+union sa_command_0 {
+ struct {
+ u32 scatter:1;
+ u32 gather:1;
+ u32 save_hash_state:1;
+ u32 save_iv:1;
+ u32 load_hash_state:2;
+ u32 load_iv:2;
+ u32 digest_len:4;
+ u32 hdr_proc:1;
+ u32 extend_pad:1;
+ u32 stream_cipher_pad:1;
+ u32 rsv:1;
+ u32 hash_alg:4;
+ u32 cipher_alg:4;
+ u32 pad_type:2;
+ u32 op_group:2;
+ u32 dir:1;
+ u32 opcode:3;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+#define CRYPTO_MODE_ECB 0
+#define CRYPTO_MODE_CBC 1
+
+#define CRYPTO_FEEDBACK_MODE_NO_FB 0
+#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
+#define CRYPTO_FEEDBACK_MODE_8BIT_CFB 1
+#define CRYPTO_FEEDBACK_MODE_1BIT_CFB 2
+#define CRYPTO_FEEDBACK_MODE_128BIT_CFB 3
+
+#define SA_AES_KEY_LEN_128 2
+#define SA_AES_KEY_LEN_192 3
+#define SA_AES_KEY_LEN_256 4
+
+#define SA_REV2 1
+/**
+ * The follow defines bits sa_command_1
+ * In Basic hash mode this bit define simple hash or hmac.
+ * In IPsec mode, this bit define muting control.
+ */
+#define SA_HASH_MODE_HASH 0
+#define SA_HASH_MODE_HMAC 1
+#define SA_MC_ENABLE 0
+#define SA_MC_DISABLE 1
+#define SA_NOT_COPY_HDR 0
+#define SA_COPY_HDR 1
+#define SA_NOT_COPY_PAD 0
+#define SA_COPY_PAD 1
+#define SA_NOT_COPY_PAYLOAD 0
+#define SA_COPY_PAYLOAD 1
+#define SA_EXTENDED_SN_OFF 0
+#define SA_EXTENDED_SN_ON 1
+#define SA_SEQ_MASK_OFF 0
+#define SA_SEQ_MASK_ON 1
+
+union sa_command_1 {
+ struct {
+ u32 crypto_mode31:1;
+ u32 save_arc4_state:1;
+ u32 arc4_stateful:1;
+ u32 key_len:5;
+ u32 hash_crypto_offset:8;
+ u32 sa_rev:2;
+ u32 byte_offset:1;
+ u32 hmac_muting:1;
+ u32 feedback_mode:2;
+ u32 crypto_mode9_8:2;
+ u32 extended_seq_num:1;
+ u32 seq_num_mask:1;
+ u32 mutable_bit_proc:1;
+ u32 ip_version:1;
+ u32 copy_pad:1;
+ u32 copy_payload:1;
+ u32 copy_hdr:1;
+ u32 rsv1:1;
+ } bf;
+ u32 w;
+} __attribute__((packed));
+
+struct dynamic_sa_ctl {
+ u32 sa_contents;
+ union sa_command_0 sa_command_0;
+ union sa_command_1 sa_command_1;
+} __attribute__((packed));
+
+/**
+ * State Record for Security Association (SA)
+ */
+struct sa_state_record {
+ u32 save_iv[4];
+ u32 save_hash_byte_cnt[2];
+ u32 save_digest[16];
+} __attribute__((packed));
+
+/**
+ * Security Association (SA) for AES128
+ *
+ */
+struct dynamic_sa_aes128 {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES128_LEN (sizeof(struct dynamic_sa_aes128)/4)
+#define SA_AES128_CONTENTS 0x3e000042
+
+/*
+ * Security Association (SA) for AES192
+ */
+struct dynamic_sa_aes192 {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
+#define SA_AES192_CONTENTS 0x3e000062
+
+/**
+ * Security Association (SA) for AES256
+ */
+struct dynamic_sa_aes256 {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES256_LEN (sizeof(struct dynamic_sa_aes256)/4)
+#define SA_AES256_CONTENTS 0x3e000082
+#define SA_AES_CONTENTS 0x3e000002
+
+/**
+ * Security Association (SA) for HASH160: HMAC-SHA1
+ */
+struct dynamic_sa_hash160 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[5];
+ u32 outer_digest[5];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
+#define SA_HASH160_CONTENTS 0x2000a502
+
+#endif
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 5ea3bfad172..640c9920724 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -56,8 +56,12 @@ if IDE
comment "Please see Documentation/ide/ide.txt for help/info on IDE drives"
+config IDE_XFER_MODE
+ bool
+
config IDE_TIMINGS
bool
+ select IDE_XFER_MODE
config IDE_ATAPI
bool
@@ -698,6 +702,7 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
config BLK_DEV_IDE_AU1XXX
bool "IDE for AMD Alchemy Au1200"
depends on SOC_AU1200
+ select IDE_XFER_MODE
choice
prompt "IDE Mode for AMD Alchemy Au1200"
default CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
@@ -871,6 +876,7 @@ config BLK_DEV_ALI14XX
config BLK_DEV_DTC2278
tristate "DTC-2278 support"
+ select IDE_XFER_MODE
select IDE_LEGACY
help
This driver is enabled at runtime using the "dtc2278.probe" kernel
@@ -902,6 +908,7 @@ config BLK_DEV_QD65XX
config BLK_DEV_UMC8672
tristate "UMC-8672 support"
+ select IDE_XFER_MODE
select IDE_LEGACY
help
This driver is enabled at runtime using the "umc8672.probe" kernel
@@ -915,5 +922,6 @@ endif
config BLK_DEV_IDEDMA
def_bool BLK_DEV_IDEDMA_SFF || \
BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+ select IDE_XFER_MODE
endif # IDE
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 1c326d94aa6..9b4bbe1cdc1 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -5,9 +5,11 @@
EXTRA_CFLAGS += -Idrivers/ide
ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
- ide-taskfile.o ide-pm.o ide-park.o ide-pio-blacklist.o ide-sysfs.o
+ ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \
+ ide-io-std.o ide-eh.o
# core IDE code
+ide-core-$(CONFIG_IDE_XFER_MODE) += ide-pio-blacklist.o ide-xfer-mode.o
ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o
ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
index 4485b9c6f0e..878f8ec6dbe 100644
--- a/drivers/ide/aec62xx.c
+++ b/drivers/ide/aec62xx.c
@@ -139,7 +139,7 @@ static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
}
-static unsigned int init_chipset_aec62xx(struct pci_dev *dev)
+static int init_chipset_aec62xx(struct pci_dev *dev)
{
/* These are necessary to get AEC6280 Macintosh cards to work */
if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) ||
@@ -156,7 +156,7 @@ static unsigned int init_chipset_aec62xx(struct pci_dev *dev)
pci_write_config_byte(dev, 0x4a, reg4ah | 0x80);
}
- return dev->irq;
+ return 0;
}
static u8 atp86x_cable_detect(ide_hwif_t *hwif)
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 66f43083408..d3513b6b853 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -212,7 +212,7 @@ static int ali15x3_dma_setup(ide_drive_t *drive)
* appropriate also sets up the 1533 southbridge.
*/
-static unsigned int init_chipset_ali15x3(struct pci_dev *dev)
+static int init_chipset_ali15x3(struct pci_dev *dev)
{
unsigned long flags;
u8 tmpbyte;
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 77267c85996..628cd2e5fed 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -140,7 +140,7 @@ static void amd7411_cable_detect(struct pci_dev *dev)
* The initialization callback. Initialize drive independent registers.
*/
-static unsigned int init_chipset_amd74xx(struct pci_dev *dev)
+static int init_chipset_amd74xx(struct pci_dev *dev)
{
u8 t = 0, offset = amd_offset(dev);
@@ -172,7 +172,7 @@ static unsigned int init_chipset_amd74xx(struct pci_dev *dev)
t |= 0xf0;
pci_write_config_byte(dev, AMD_IDE_CONFIG + offset, t);
- return dev->irq;
+ return 0;
}
static u8 amd_cable_detect(ide_hwif_t *hwif)
@@ -183,14 +183,6 @@ static u8 amd_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA40;
}
-static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- if (hwif->irq == 0) /* 0 is bogus but will do for now */
- hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
-}
-
static const struct ide_port_ops amd_port_ops = {
.set_pio_mode = amd_set_pio_mode,
.set_dma_mode = amd_set_drive,
@@ -207,7 +199,6 @@ static const struct ide_port_ops amd_port_ops = {
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_amd74xx, \
- .init_hwif = init_hwif_amd74xx, \
.enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
.port_ops = &amd_port_ops, \
.host_flags = IDE_HFLAGS_AMD, \
@@ -221,7 +212,6 @@ static const struct ide_port_ops amd_port_ops = {
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_amd74xx, \
- .init_hwif = init_hwif_amd74xx, \
.enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
.port_ops = &amd_port_ops, \
.host_flags = IDE_HFLAGS_AMD, \
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index ecd1e62ca91..923cbfe259d 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -142,7 +142,6 @@ static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
.name = DRV_NAME,
.enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
.port_ops = &atiixp_port_ops,
- .host_flags = IDE_HFLAG_LEGACY_IRQS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -151,7 +150,7 @@ static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
.name = DRV_NAME,
.enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
.port_ops = &atiixp_port_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS,
+ .host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 2f9688d87ec..aeee036b150 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -333,7 +333,7 @@ static int cmd646_1_dma_end(ide_drive_t *drive)
return (dma_stat & 7) != 4;
}
-static unsigned int init_chipset_cmd64x(struct pci_dev *dev)
+static int init_chipset_cmd64x(struct pci_dev *dev)
{
u8 mrdmode = 0;
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index d003bec56ff..58fb90e5b76 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -133,7 +133,8 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
* do all the device setup for us
*/
- ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]);
+ ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
+ hw[0].irq = 14;
return ide_host_add(d, hws, NULL);
}
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
index d8ede85fe17..8e8b35a8990 100644
--- a/drivers/ide/cs5530.c
+++ b/drivers/ide/cs5530.c
@@ -135,7 +135,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
* Initialize the cs5530 bridge for reliable IDE DMA operation.
*/
-static unsigned int init_chipset_cs5530(struct pci_dev *dev)
+static int init_chipset_cs5530(struct pci_dev *dev)
{
struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
index 8f1b2d9f051..bacb1194c9c 100644
--- a/drivers/ide/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
@@ -46,7 +46,7 @@ static const struct ide_port_ops delkin_cb_port_ops = {
.quirkproc = ide_undecoded_slave,
};
-static unsigned int delkin_cb_init_chipset(struct pci_dev *dev)
+static int delkin_cb_init_chipset(struct pci_dev *dev)
{
unsigned long base = pci_resource_start(dev, 0);
int i;
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 3eb9b5c63a0..d3b3e824f44 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -995,7 +995,7 @@ static void hpt3xx_disable_fast_irq(struct pci_dev *dev, u8 mcr_addr)
pci_write_config_byte(dev, mcr_addr + 1, new_mcr);
}
-static unsigned int init_chipset_hpt366(struct pci_dev *dev)
+static int init_chipset_hpt366(struct pci_dev *dev)
{
unsigned long io_base = pci_resource_start(dev, 4);
struct hpt_info *info = hpt3xx_get_info(&dev->dev);
@@ -1237,7 +1237,7 @@ static unsigned int init_chipset_hpt366(struct pci_dev *dev)
hpt3xx_disable_fast_irq(dev, 0x50);
hpt3xx_disable_fast_irq(dev, 0x54);
- return dev->irq;
+ return 0;
}
static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index ec7d07fa570..5b704f1ea90 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -20,9 +20,6 @@
#include <acpi/acpi_bus.h>
#define REGS_PER_GTF 7
-struct taskfile_array {
- u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
-};
struct GTM_buffer {
u32 PIO_speed0;
@@ -89,12 +86,8 @@ static const struct dmi_system_id ide_acpi_dmi_table[] = {
{ } /* terminate list */
};
-static int ide_acpi_blacklist(void)
+int ide_acpi_init(void)
{
- static int done;
- if (done)
- return 0;
- done = 1;
dmi_check_system(ide_acpi_dmi_table);
return 0;
}
@@ -202,40 +195,6 @@ static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
}
/**
- * ide_acpi_drive_get_handle - Get ACPI object handle for a given drive
- * @drive: device to locate
- *
- * Retrieves the object handle of a given drive. According to the ACPI
- * spec the drive is a child of the hwif.
- *
- * Returns handle on success, 0 on error.
- */
-static acpi_handle ide_acpi_drive_get_handle(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- int port;
- acpi_handle drive_handle;
-
- if (!hwif->acpidata)
- return NULL;
-
- if (!hwif->acpidata->obj_handle)
- return NULL;
-
- port = hwif->channel ? drive->dn - 2: drive->dn;
-
- DEBPRINT("ENTER: %s at channel#: %d port#: %d\n",
- drive->name, hwif->channel, port);
-
-
- /* TBD: could also check ACPI object VALID bits */
- drive_handle = acpi_get_child(hwif->acpidata->obj_handle, port);
- DEBPRINT("drive %s handle 0x%p\n", drive->name, drive_handle);
-
- return drive_handle;
-}
-
-/**
* do_drive_get_GTF - get the drive bootup default taskfile settings
* @drive: the drive for which the taskfile settings should be retrieved
* @gtf_length: number of bytes of _GTF data returned at @gtf_address
@@ -257,47 +216,15 @@ static int do_drive_get_GTF(ide_drive_t *drive,
acpi_status status;
struct acpi_buffer output;
union acpi_object *out_obj;
- ide_hwif_t *hwif = drive->hwif;
- struct device *dev = hwif->gendev.parent;
int err = -ENODEV;
- int port;
*gtf_length = 0;
*gtf_address = 0UL;
*obj_loc = 0UL;
- if (ide_noacpi)
- return 0;
-
- if (!dev) {
- DEBPRINT("no PCI device for %s\n", hwif->name);
- goto out;
- }
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- goto out;
- }
-
- port = hwif->channel ? drive->dn - 2: drive->dn;
-
- DEBPRINT("ENTER: %s at %s, port#: %d, hard_port#: %d\n",
- hwif->name, dev_name(dev), port, hwif->channel);
-
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) {
- DEBPRINT("%s drive %d:%d not present\n",
- hwif->name, hwif->channel, port);
- goto out;
- }
-
- /* Get this drive's _ADR info. if not already known. */
if (!drive->acpidata->obj_handle) {
- drive->acpidata->obj_handle = ide_acpi_drive_get_handle(drive);
- if (!drive->acpidata->obj_handle) {
- DEBPRINT("No ACPI object found for %s\n",
- drive->name);
- goto out;
- }
+ DEBPRINT("No ACPI object found for %s\n", drive->name);
+ goto out;
}
/* Setting up output buffer */
@@ -355,43 +282,6 @@ out:
}
/**
- * taskfile_load_raw - send taskfile registers to drive
- * @drive: drive to which output is sent
- * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
- *
- * Outputs IDE taskfile to the drive.
- */
-static int taskfile_load_raw(ide_drive_t *drive,
- const struct taskfile_array *gtf)
-{
- ide_task_t args;
- int err = 0;
-
- DEBPRINT("(0x1f1-1f7): hex: "
- "%02x %02x %02x %02x %02x %02x %02x\n",
- gtf->tfa[0], gtf->tfa[1], gtf->tfa[2],
- gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]);
-
- memset(&args, 0, sizeof(ide_task_t));
-
- /* convert gtf to IDE Taskfile */
- memcpy(&args.tf_array[7], &gtf->tfa, 7);
- args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
-
- if (!ide_acpigtf) {
- DEBPRINT("_GTF execution disabled\n");
- return err;
- }
-
- err = ide_no_data_taskfile(drive, &args);
- if (err)
- printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
- __func__, err);
-
- return err;
-}
-
-/**
* do_drive_set_taskfiles - write the drive taskfile settings from _GTF
* @drive: the drive to which the taskfile command should be sent
* @gtf_length: total number of bytes of _GTF taskfiles
@@ -404,43 +294,41 @@ static int do_drive_set_taskfiles(ide_drive_t *drive,
unsigned int gtf_length,
unsigned long gtf_address)
{
- int rc = -ENODEV, err;
+ int rc = 0, err;
int gtf_count = gtf_length / REGS_PER_GTF;
int ix;
- struct taskfile_array *gtf;
-
- if (ide_noacpi)
- return 0;
-
- DEBPRINT("ENTER: %s, hard_port#: %d\n", drive->name, drive->dn);
-
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- goto out;
-
- if (!gtf_count) /* shouldn't be here */
- goto out;
DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n",
gtf_length, gtf_length, gtf_count, gtf_address);
- if (gtf_length % REGS_PER_GTF) {
- printk(KERN_ERR "%s: unexpected GTF length (%d)\n",
- __func__, gtf_length);
- goto out;
- }
-
- rc = 0;
+ /* send all taskfile registers (0x1f1-0x1f7) *in*that*order* */
for (ix = 0; ix < gtf_count; ix++) {
- gtf = (struct taskfile_array *)
- (gtf_address + ix * REGS_PER_GTF);
+ u8 *gtf = (u8 *)(gtf_address + ix * REGS_PER_GTF);
+ ide_task_t task;
- /* send all TaskFile registers (0x1f1-0x1f7) *in*that*order* */
- err = taskfile_load_raw(drive, gtf);
- if (err)
+ DEBPRINT("(0x1f1-1f7): "
+ "hex: %02x %02x %02x %02x %02x %02x %02x\n",
+ gtf[0], gtf[1], gtf[2],
+ gtf[3], gtf[4], gtf[5], gtf[6]);
+
+ if (!ide_acpigtf) {
+ DEBPRINT("_GTF execution disabled\n");
+ continue;
+ }
+
+ /* convert GTF to taskfile */
+ memset(&task, 0, sizeof(ide_task_t));
+ memcpy(&task.tf_array[7], gtf, REGS_PER_GTF);
+ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+
+ err = ide_no_data_taskfile(drive, &task);
+ if (err) {
+ printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
+ __func__, err);
rc = err;
+ }
}
-out:
return rc;
}
@@ -647,26 +535,23 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
DEBPRINT("no ACPI data for %s\n", hwif->name);
return;
}
+
/* channel first and then drives for power on and verse versa for power off */
if (on)
acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
- ide_port_for_each_dev(i, drive, hwif) {
- if (!drive->acpidata->obj_handle)
- drive->acpidata->obj_handle = ide_acpi_drive_get_handle(drive);
-
- if (drive->acpidata->obj_handle &&
- (drive->dev_flags & IDE_DFLAG_PRESENT)) {
+ ide_port_for_each_present_dev(i, drive, hwif) {
+ if (drive->acpidata->obj_handle)
acpi_bus_set_power(drive->acpidata->obj_handle,
- on? ACPI_STATE_D0: ACPI_STATE_D3);
- }
+ on ? ACPI_STATE_D0 : ACPI_STATE_D3);
}
+
if (!on)
acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D3);
}
/**
- * ide_acpi_init - initialize the ACPI link for an IDE interface
+ * ide_acpi_init_port - initialize the ACPI link for an IDE interface
* @hwif: target IDE interface (channel)
*
* The ACPI spec is not quite clear when the drive identify buffer
@@ -676,10 +561,8 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
* So we get the information during startup; but this means that
* any changes during run-time will be lost after resume.
*/
-void ide_acpi_init(ide_hwif_t *hwif)
+void ide_acpi_init_port(ide_hwif_t *hwif)
{
- ide_acpi_blacklist();
-
hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL);
if (!hwif->acpidata)
return;
@@ -708,15 +591,24 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
hwif->devices[0]->acpidata = &hwif->acpidata->master;
hwif->devices[1]->acpidata = &hwif->acpidata->slave;
- /*
- * Send IDENTIFY for each drive
- */
- ide_port_for_each_dev(i, drive, hwif) {
- memset(drive->acpidata, 0, sizeof(*drive->acpidata));
+ /* get _ADR info for each device */
+ ide_port_for_each_present_dev(i, drive, hwif) {
+ acpi_handle dev_handle;
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- continue;
+ DEBPRINT("ENTER: %s at channel#: %d port#: %d\n",
+ drive->name, hwif->channel, drive->dn & 1);
+
+ /* TBD: could also check ACPI object VALID bits */
+ dev_handle = acpi_get_child(hwif->acpidata->obj_handle,
+ drive->dn & 1);
+
+ DEBPRINT("drive %s handle 0x%p\n", drive->name, dev_handle);
+
+ drive->acpidata->obj_handle = dev_handle;
+ }
+ /* send IDENTIFY for each device */
+ ide_port_for_each_present_dev(i, drive, hwif) {
err = taskfile_lib_get_identify(drive, drive->acpidata->idbuff);
if (err)
DEBPRINT("identify device %s failed (%d)\n",
@@ -736,9 +628,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
ide_acpi_get_timing(hwif);
ide_acpi_push_timing(hwif);
- ide_port_for_each_dev(i, drive, hwif) {
- if (drive->dev_flags & IDE_DFLAG_PRESENT)
- /* Execute ACPI startup code */
- ide_acpi_exec_tfs(drive);
+ ide_port_for_each_present_dev(i, drive, hwif) {
+ ide_acpi_exec_tfs(drive);
}
}
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index e9d042dba0e..6adc5b4a440 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -149,7 +149,10 @@ static void ide_queue_pc_head(ide_drive_t *drive, struct gendisk *disk,
memcpy(rq->cmd, pc->c, 12);
if (drive->media == ide_tape)
rq->cmd[13] = REQ_IDETAPE_PC1;
- ide_do_drive_cmd(drive, rq);
+
+ drive->hwif->rq = NULL;
+
+ elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
}
/*
@@ -297,6 +300,21 @@ int ide_cd_get_xferlen(struct request *rq)
}
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
+void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
+ IDE_TFLAG_IN_NSECT;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ *bcount = (task.tf.lbah << 8) | task.tf.lbam;
+ *ireason = task.tf.nsect & 3;
+}
+EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
+
/*
* This is the usual interrupt handler which will be called during a packet
* command. We will transfer some of the data (as requested by the drive)
@@ -456,6 +474,25 @@ next_irq:
return ide_started;
}
+static void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t task;
+ u8 dma = drive->dma;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
+ IDE_TFLAG_OUT_FEATURE | tf_flags;
+ task.tf.feature = dma; /* Use PIO/DMA */
+ task.tf.lbam = bcount & 0xff;
+ task.tf.lbah = (bcount >> 8) & 0xff;
+
+ ide_tf_dump(drive->name, &task.tf);
+ hwif->tp_ops->set_irq(hwif, 1);
+ SELECT_MASK(drive, 0);
+ hwif->tp_ops->tf_load(drive, &task);
+}
+
static u8 ide_read_ireason(ide_drive_t *drive)
{
ide_task_t task;
@@ -629,7 +666,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive)
: WAIT_TAPE_CMD;
}
- ide_pktcmd_tf_load(drive, tf_flags, bcount, drive->dma);
+ ide_pktcmd_tf_load(drive, tf_flags, bcount);
/* Issue the packet command */
if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ddfbea41d29..2177cd11664 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -242,7 +242,9 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x\n",
failed_command->cmd[0]);
- ide_do_drive_cmd(drive, rq);
+ drive->hwif->rq = NULL;
+
+ elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
}
static void cdrom_end_request(ide_drive_t *drive, int uptodate)
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
new file mode 100644
index 00000000000..7c3953414d4
--- /dev/null
+++ b/drivers/ide/ide-devsets.c
@@ -0,0 +1,190 @@
+
+#include <linux/kernel.h>
+#include <linux/ide.h>
+
+DEFINE_MUTEX(ide_setting_mtx);
+
+ide_devset_get(io_32bit, io_32bit);
+
+static int set_io_32bit(ide_drive_t *drive, int arg)
+{
+ if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT)
+ return -EPERM;
+
+ if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
+ return -EINVAL;
+
+ drive->io_32bit = arg;
+
+ return 0;
+}
+
+ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS);
+
+static int set_ksettings(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS;
+
+ return 0;
+}
+
+ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA);
+
+static int set_using_dma(ide_drive_t *drive, int arg)
+{
+#ifdef CONFIG_BLK_DEV_IDEDMA
+ int err = -EPERM;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (ata_id_has_dma(drive->id) == 0)
+ goto out;
+
+ if (drive->hwif->dma_ops == NULL)
+ goto out;
+
+ err = 0;
+
+ if (arg) {
+ if (ide_set_dma(drive))
+ err = -EIO;
+ } else
+ ide_dma_off(drive);
+
+out:
+ return err;
+#else
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ return -EPERM;
+#endif
+}
+
+/*
+ * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
+ */
+static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
+{
+ switch (req_pio) {
+ case 202:
+ case 201:
+ case 200:
+ case 102:
+ case 101:
+ case 100:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
+ case 9:
+ case 8:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
+ case 7:
+ case 6:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int set_pio_mode(ide_drive_t *drive, int arg)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (arg < 0 || arg > 255)
+ return -EINVAL;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return -ENOSYS;
+
+ if (set_pio_mode_abuse(drive->hwif, arg)) {
+ if (arg == 8 || arg == 9) {
+ unsigned long flags;
+
+ /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
+ spin_lock_irqsave(&hwif->lock, flags);
+ port_ops->set_pio_mode(drive, arg);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ } else
+ port_ops->set_pio_mode(drive, arg);
+ } else {
+ int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
+
+ ide_set_pio(drive, arg);
+
+ if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
+ if (keep_dma)
+ ide_dma_on(drive);
+ }
+ }
+
+ return 0;
+}
+
+ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK);
+
+static int set_unmaskirq(ide_drive_t *drive, int arg)
+{
+ if (drive->dev_flags & IDE_DFLAG_NO_UNMASK)
+ return -EPERM;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_UNMASK;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_UNMASK;
+
+ return 0;
+}
+
+ide_ext_devset_rw_sync(io_32bit, io_32bit);
+ide_ext_devset_rw_sync(keepsettings, ksettings);
+ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
+ide_ext_devset_rw_sync(using_dma, using_dma);
+__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
+
+int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
+ int arg)
+{
+ struct request_queue *q = drive->queue;
+ struct request *rq;
+ int ret = 0;
+
+ if (!(setting->flags & DS_SYNC))
+ return setting->set(drive, arg);
+
+ rq = blk_get_request(q, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_SPECIAL;
+ rq->cmd_len = 5;
+ rq->cmd[0] = REQ_DEVSET_EXEC;
+ *(int *)&rq->cmd[1] = arg;
+ rq->special = setting->set;
+
+ if (blk_execute_rq(q, NULL, rq, 0))
+ ret = rq->errors;
+ blk_put_request(rq);
+
+ return ret;
+}
+
+ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
+{
+ int err, (*setfunc)(ide_drive_t *, int) = rq->special;
+
+ err = setfunc(drive, *(int *)&rq->cmd[1]);
+ if (err)
+ rq->errors = err;
+ else
+ err = 1;
+ ide_end_request(drive, err, 0);
+ return ide_stopped;
+}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 059c90bb5ad..a878f4734f8 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -470,6 +470,63 @@ void ide_dma_timeout(ide_drive_t *drive)
}
EXPORT_SYMBOL_GPL(ide_dma_timeout);
+/*
+ * un-busy the port etc, and clear any pending DMA status. we want to
+ * retry the current request in pio mode instead of risking tossing it
+ * all away
+ */
+ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ ide_startstop_t ret = ide_stopped;
+
+ /*
+ * end current dma transaction
+ */
+
+ if (error < 0) {
+ printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
+ (void)hwif->dma_ops->dma_end(drive);
+ ret = ide_error(drive, "dma timeout error",
+ hwif->tp_ops->read_status(hwif));
+ } else {
+ printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
+ hwif->dma_ops->dma_timeout(drive);
+ }
+
+ /*
+ * disable dma for now, but remember that we did so because of
+ * a timeout -- we'll reenable after we finish this next request
+ * (or rather the first chunk of it) in pio.
+ */
+ drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
+ drive->retry_pio++;
+ ide_dma_off_quietly(drive);
+
+ /*
+ * un-busy drive etc and make sure request is sane
+ */
+
+ rq = hwif->rq;
+ if (!rq)
+ goto out;
+
+ hwif->rq = NULL;
+
+ rq->errors = 0;
+
+ if (!rq->bio)
+ goto out;
+
+ rq->sector = rq->bio->bi_sector;
+ rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
+ rq->hard_cur_sectors = rq->current_nr_sectors;
+ rq->buffer = bio_data(rq->bio);
+out:
+ return ret;
+}
+
void ide_release_dma_engine(ide_hwif_t *hwif)
{
if (hwif->dmatable_cpu) {
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
new file mode 100644
index 00000000000..1231b5e486f
--- /dev/null
+++ b/drivers/ide/ide-eh.c
@@ -0,0 +1,428 @@
+
+#include <linux/kernel.h>
+#include <linux/ide.h>
+#include <linux/delay.h>
+
+static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
+ u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if ((stat & ATA_BUSY) ||
+ ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
+ /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else if (stat & ATA_ERR) {
+ /* err has different meaning on cdrom and tape */
+ if (err == ATA_ABORTED) {
+ if ((drive->dev_flags & IDE_DFLAG_LBA) &&
+ /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
+ hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
+ return ide_stopped;
+ } else if ((err & BAD_CRC) == BAD_CRC) {
+ /* UDMA crc error, just retry the operation */
+ drive->crc_count++;
+ } else if (err & (ATA_BBK | ATA_UNC)) {
+ /* retries won't help these */
+ rq->errors = ERROR_MAX;
+ } else if (err & ATA_TRK0NF) {
+ /* help it find track zero */
+ rq->errors |= ERROR_RECAL;
+ }
+ }
+
+ if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
+ (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
+ int nsect = drive->mult_count ? drive->mult_count : 1;
+
+ ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
+ }
+
+ if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
+ ide_kill_rq(drive, rq);
+ return ide_stopped;
+ }
+
+ if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
+ rq->errors |= ERROR_RESET;
+
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ return ide_do_reset(drive);
+ }
+
+ if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
+ drive->special.b.recalibrate = 1;
+
+ ++rq->errors;
+
+ return ide_stopped;
+}
+
+static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq,
+ u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if ((stat & ATA_BUSY) ||
+ ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
+ /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else {
+ /* add decoding error stuff */
+ }
+
+ if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
+ /* force an abort */
+ hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
+
+ if (rq->errors >= ERROR_MAX) {
+ ide_kill_rq(drive, rq);
+ } else {
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ return ide_do_reset(drive);
+ }
+ ++rq->errors;
+ }
+
+ return ide_stopped;
+}
+
+static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq,
+ u8 stat, u8 err)
+{
+ if (drive->media == ide_disk)
+ return ide_ata_error(drive, rq, stat, err);
+ return ide_atapi_error(drive, rq, stat, err);
+}
+
+/**
+ * ide_error - handle an error on the IDE
+ * @drive: drive the error occurred on
+ * @msg: message to report
+ * @stat: status bits
+ *
+ * ide_error() takes action based on the error returned by the drive.
+ * For normal I/O that may well include retries. We deal with
+ * both new-style (taskfile) and old style command handling here.
+ * In the case of taskfile command handling there is work left to
+ * do
+ */
+
+ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
+{
+ struct request *rq;
+ u8 err;
+
+ err = ide_dump_status(drive, msg, stat);
+
+ rq = drive->hwif->rq;
+ if (rq == NULL)
+ return ide_stopped;
+
+ /* retry only "normal" I/O: */
+ if (!blk_fs_request(rq)) {
+ rq->errors = 1;
+ ide_end_drive_cmd(drive, stat, err);
+ return ide_stopped;
+ }
+
+ return __ide_error(drive, rq, stat, err);
+}
+EXPORT_SYMBOL_GPL(ide_error);
+
+static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
+{
+ struct request *rq = drive->hwif->rq;
+
+ if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET)
+ ide_end_request(drive, err ? err : 1, 0);
+}
+
+/* needed below */
+static ide_startstop_t do_reset1(ide_drive_t *, int);
+
+/*
+ * atapi_reset_pollfunc() gets invoked to poll the interface for completion
+ * every 50ms during an atapi drive reset operation. If the drive has not yet
+ * responded, and we have not yet hit our maximum waiting time, then the timer
+ * is restarted for another 50ms.
+ */
+static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat;
+
+ SELECT_DRIVE(drive);
+ udelay(10);
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, 0, ATA_BUSY))
+ printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name);
+ else {
+ if (time_before(jiffies, hwif->poll_timeout)) {
+ ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20,
+ NULL);
+ /* continue polling */
+ return ide_started;
+ }
+ /* end of polling */
+ hwif->polling = 0;
+ printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n",
+ drive->name, stat);
+ /* do it the old fashioned way */
+ return do_reset1(drive, 1);
+ }
+ /* done polling */
+ hwif->polling = 0;
+ ide_complete_drive_reset(drive, 0);
+ return ide_stopped;
+}
+
+static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
+{
+ static const char *err_master_vals[] =
+ { NULL, "passed", "formatter device error",
+ "sector buffer error", "ECC circuitry error",
+ "controlling MPU error" };
+
+ u8 err_master = err & 0x7f;
+
+ printk(KERN_ERR "%s: reset: master: ", hwif->name);
+ if (err_master && err_master < 6)
+ printk(KERN_CONT "%s", err_master_vals[err_master]);
+ else
+ printk(KERN_CONT "error (0x%02x?)", err);
+ if (err & 0x80)
+ printk(KERN_CONT "; slave: failed");
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an ide reset operation. If the drives have not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ u8 tmp;
+ int err = 0;
+
+ if (port_ops && port_ops->reset_poll) {
+ err = port_ops->reset_poll(drive);
+ if (err) {
+ printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
+ hwif->name, drive->name);
+ goto out;
+ }
+ }
+
+ tmp = hwif->tp_ops->read_status(hwif);
+
+ if (!OK_STAT(tmp, 0, ATA_BUSY)) {
+ if (time_before(jiffies, hwif->poll_timeout)) {
+ ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
+ /* continue polling */
+ return ide_started;
+ }
+ printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
+ hwif->name, tmp);
+ drive->failures++;
+ err = -EIO;
+ } else {
+ tmp = ide_read_error(drive);
+
+ if (tmp == 1) {
+ printk(KERN_INFO "%s: reset: success\n", hwif->name);
+ drive->failures = 0;
+ } else {
+ ide_reset_report_error(hwif, tmp);
+ drive->failures++;
+ err = -EIO;
+ }
+ }
+out:
+ hwif->polling = 0; /* done polling */
+ ide_complete_drive_reset(drive, err);
+ return ide_stopped;
+}
+
+static void ide_disk_pre_reset(ide_drive_t *drive)
+{
+ int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
+
+ drive->special.all = 0;
+ drive->special.b.set_geometry = legacy;
+ drive->special.b.recalibrate = legacy;
+
+ drive->mult_count = 0;
+ drive->dev_flags &= ~IDE_DFLAG_PARKED;
+
+ if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
+ (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
+ drive->mult_req = 0;
+
+ if (drive->mult_req != drive->mult_count)
+ drive->special.b.set_multmode = 1;
+}
+
+static void pre_reset(ide_drive_t *drive)
+{
+ const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
+ if (drive->media == ide_disk)
+ ide_disk_pre_reset(drive);
+ else
+ drive->dev_flags |= IDE_DFLAG_POST_RESET;
+
+ if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
+ if (drive->crc_count)
+ ide_check_dma_crc(drive);
+ else
+ ide_dma_off(drive);
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
+ if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
+ drive->dev_flags &= ~IDE_DFLAG_UNMASK;
+ drive->io_32bit = 0;
+ }
+ return;
+ }
+
+ if (port_ops && port_ops->pre_reset)
+ port_ops->pre_reset(drive);
+
+ if (drive->current_speed != 0xff)
+ drive->desired_speed = drive->current_speed;
+ drive->current_speed = 0xff;
+}
+
+/*
+ * do_reset1() attempts to recover a confused drive by resetting it.
+ * Unfortunately, resetting a disk drive actually resets all devices on
+ * the same interface, so it can really be thought of as resetting the
+ * interface rather than resetting the drive.
+ *
+ * ATAPI devices have their own reset mechanism which allows them to be
+ * individually reset without clobbering other devices on the same interface.
+ *
+ * Unfortunately, the IDE interface does not generate an interrupt to let
+ * us know when the reset operation has finished, so we must poll for this.
+ * Equally poor, though, is the fact that this may a very long time to complete,
+ * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
+ * we set a timer to poll at 50ms intervals.
+ */
+static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ const struct ide_port_ops *port_ops;
+ ide_drive_t *tdrive;
+ unsigned long flags, timeout;
+ int i;
+ DEFINE_WAIT(wait);
+
+ spin_lock_irqsave(&hwif->lock, flags);
+
+ /* We must not reset with running handlers */
+ BUG_ON(hwif->handler != NULL);
+
+ /* For an ATAPI device, first try an ATAPI SRST. */
+ if (drive->media != ide_disk && !do_not_try_atapi) {
+ pre_reset(drive);
+ SELECT_DRIVE(drive);
+ udelay(20);
+ tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
+ ndelay(400);
+ hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
+ hwif->polling = 1;
+ __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return ide_started;
+ }
+
+ /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
+ do {
+ unsigned long now;
+
+ prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
+ timeout = jiffies;
+ ide_port_for_each_present_dev(i, tdrive, hwif) {
+ if ((tdrive->dev_flags & IDE_DFLAG_PARKED) &&
+ time_after(tdrive->sleep, timeout))
+ timeout = tdrive->sleep;
+ }
+
+ now = jiffies;
+ if (time_before_eq(timeout, now))
+ break;
+
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ timeout = schedule_timeout_uninterruptible(timeout - now);
+ spin_lock_irqsave(&hwif->lock, flags);
+ } while (timeout);
+ finish_wait(&ide_park_wq, &wait);
+
+ /*
+ * First, reset any device state data we were maintaining
+ * for any of the drives on this interface.
+ */
+ ide_port_for_each_dev(i, tdrive, hwif)
+ pre_reset(tdrive);
+
+ if (io_ports->ctl_addr == 0) {
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ ide_complete_drive_reset(drive, -ENXIO);
+ return ide_stopped;
+ }
+
+ /*
+ * Note that we also set nIEN while resetting the device,
+ * to mask unwanted interrupts from the interface during the reset.
+ * However, due to the design of PC hardware, this will cause an
+ * immediate interrupt due to the edge transition it produces.
+ * This single interrupt gives us a "fast poll" for drives that
+ * recover from reset very quickly, saving us the first 50ms wait time.
+ *
+ * TODO: add ->softreset method and stop abusing ->set_irq
+ */
+ /* set SRST and nIEN */
+ tp_ops->set_irq(hwif, 4);
+ /* more than enough time */
+ udelay(10);
+ /* clear SRST, leave nIEN (unless device is on the quirk list) */
+ tp_ops->set_irq(hwif, drive->quirk_list == 2);
+ /* more than enough time */
+ udelay(10);
+ hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
+ hwif->polling = 1;
+ __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
+
+ /*
+ * Some weird controller like resetting themselves to a strange
+ * state when the disks are reset this way. At least, the Winbond
+ * 553 documentation says that
+ */
+ port_ops = hwif->port_ops;
+ if (port_ops && port_ops->resetproc)
+ port_ops->resetproc(drive);
+
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return ide_started;
+}
+
+/*
+ * ide_do_reset() is the entry point to the drive/interface reset code.
+ */
+
+ide_startstop_t ide_do_reset(ide_drive_t *drive)
+{
+ return do_reset1(drive, 0);
+}
+EXPORT_SYMBOL(ide_do_reset);
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
new file mode 100644
index 00000000000..45b43dd49cd
--- /dev/null
+++ b/drivers/ide/ide-io-std.c
@@ -0,0 +1,316 @@
+
+#include <linux/kernel.h>
+#include <linux/ide.h>
+
+/*
+ * Conventional PIO operations for ATA devices
+ */
+
+static u8 ide_inb(unsigned long port)
+{
+ return (u8) inb(port);
+}
+
+static void ide_outb(u8 val, unsigned long port)
+{
+ outb(val, port);
+}
+
+/*
+ * MMIO operations, typically used for SATA controllers
+ */
+
+static u8 ide_mm_inb(unsigned long port)
+{
+ return (u8) readb((void __iomem *) port);
+}
+
+static void ide_mm_outb(u8 value, unsigned long port)
+{
+ writeb(value, (void __iomem *) port);
+}
+
+void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
+ else
+ outb(cmd, hwif->io_ports.command_addr);
+}
+EXPORT_SYMBOL_GPL(ide_exec_command);
+
+u8 ide_read_status(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.status_addr);
+ else
+ return inb(hwif->io_ports.status_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_status);
+
+u8 ide_read_altstatus(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ return inb(hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_altstatus);
+
+void ide_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ outb(ctl, hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_set_irq);
+
+void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ struct ide_taskfile *tf = &task->tf;
+ void (*tf_outb)(u8 addr, unsigned long port);
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+ u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+ if (mmio)
+ tf_outb = ide_mm_outb;
+ else
+ tf_outb = ide_outb;
+
+ if (task->tf_flags & IDE_TFLAG_FLAGGED)
+ HIHI = 0xFF;
+
+ if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
+ u16 data = (tf->hob_data << 8) | tf->data;
+
+ if (mmio)
+ writew(data, (void __iomem *)io_ports->data_addr);
+ else
+ outw(data, io_ports->data_addr);
+ }
+
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+ tf_outb(tf->hob_feature, io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+ tf_outb(tf->hob_nsect, io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+ tf_outb(tf->hob_lbal, io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+ tf_outb(tf->hob_lbam, io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+ tf_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+ if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+ tf_outb(tf->feature, io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+ tf_outb(tf->nsect, io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+ tf_outb(tf->lbal, io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+ tf_outb(tf->lbam, io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+ tf_outb(tf->lbah, io_ports->lbah_addr);
+
+ if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+ tf_outb((tf->device & HIHI) | drive->select,
+ io_ports->device_addr);
+}
+EXPORT_SYMBOL_GPL(ide_tf_load);
+
+void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ struct ide_taskfile *tf = &task->tf;
+ void (*tf_outb)(u8 addr, unsigned long port);
+ u8 (*tf_inb)(unsigned long port);
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ if (mmio) {
+ tf_outb = ide_mm_outb;
+ tf_inb = ide_mm_inb;
+ } else {
+ tf_outb = ide_outb;
+ tf_inb = ide_inb;
+ }
+
+ if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+ u16 data;
+
+ if (mmio)
+ data = readw((void __iomem *)io_ports->data_addr);
+ else
+ data = inw(io_ports->data_addr);
+
+ tf->data = data & 0xff;
+ tf->hob_data = (data >> 8) & 0xff;
+ }
+
+ /* be sure we're looking at the low order bits */
+ tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+
+ if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+ tf->feature = tf_inb(io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+ tf->nsect = tf_inb(io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+ tf->lbal = tf_inb(io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+ tf->lbam = tf_inb(io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+ tf->lbah = tf_inb(io_ports->lbah_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+ tf->device = tf_inb(io_ports->device_addr);
+
+ if (task->tf_flags & IDE_TFLAG_LBA48) {
+ tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
+
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+ tf->hob_feature = tf_inb(io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+ tf->hob_nsect = tf_inb(io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+ tf->hob_lbal = tf_inb(io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+ tf->hob_lbam = tf_inb(io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+ tf->hob_lbah = tf_inb(io_ports->lbah_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_tf_read);
+
+/*
+ * Some localbus EIDE interfaces require a special access sequence
+ * when using 32-bit I/O instructions to transfer data. We call this
+ * the "vlb_sync" sequence, which consists of three successive reads
+ * of the sector count register location, with interrupts disabled
+ * to ensure that the reads all happen together.
+ */
+static void ata_vlb_sync(unsigned long port)
+{
+ (void)inb(port);
+ (void)inb(port);
+ (void)inb(port);
+}
+
+/*
+ * This is used for most PIO data transfers *from* the IDE interface
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd len is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long data_addr = io_ports->data_addr;
+ u8 io_32bit = drive->io_32bit;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ len++;
+
+ if (io_32bit) {
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+ local_irq_save(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+ if (mmio)
+ __ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
+ else
+ insl(data_addr, buf, len / 4);
+
+ if ((io_32bit & 2) && !mmio)
+ local_irq_restore(flags);
+
+ if ((len & 3) >= 2) {
+ if (mmio)
+ __ide_mm_insw((void __iomem *)data_addr,
+ (u8 *)buf + (len & ~3), 1);
+ else
+ insw(data_addr, (u8 *)buf + (len & ~3), 1);
+ }
+ } else {
+ if (mmio)
+ __ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
+ else
+ insw(data_addr, buf, len / 2);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_input_data);
+
+/*
+ * This is used for most PIO data transfers *to* the IDE interface
+ */
+void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long data_addr = io_ports->data_addr;
+ u8 io_32bit = drive->io_32bit;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ len++;
+
+ if (io_32bit) {
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+ local_irq_save(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+ if (mmio)
+ __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
+ else
+ outsl(data_addr, buf, len / 4);
+
+ if ((io_32bit & 2) && !mmio)
+ local_irq_restore(flags);
+
+ if ((len & 3) >= 2) {
+ if (mmio)
+ __ide_mm_outsw((void __iomem *)data_addr,
+ (u8 *)buf + (len & ~3), 1);
+ else
+ outsw(data_addr, (u8 *)buf + (len & ~3), 1);
+ }
+ } else {
+ if (mmio)
+ __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
+ else
+ outsw(data_addr, buf, len / 2);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_output_data);
+
+const struct ide_tp_ops default_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index a9a6c208288..2e92497b58a 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -196,7 +196,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
}
EXPORT_SYMBOL(ide_end_drive_cmd);
-static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
+void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{
if (rq->rq_disk) {
struct ide_driver *drv;
@@ -207,133 +207,6 @@ static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
ide_end_request(drive, 0, 0);
}
-static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if ((stat & ATA_BUSY) ||
- ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
- /* other bits are useless when BUSY */
- rq->errors |= ERROR_RESET;
- } else if (stat & ATA_ERR) {
- /* err has different meaning on cdrom and tape */
- if (err == ATA_ABORTED) {
- if ((drive->dev_flags & IDE_DFLAG_LBA) &&
- /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
- hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
- return ide_stopped;
- } else if ((err & BAD_CRC) == BAD_CRC) {
- /* UDMA crc error, just retry the operation */
- drive->crc_count++;
- } else if (err & (ATA_BBK | ATA_UNC)) {
- /* retries won't help these */
- rq->errors = ERROR_MAX;
- } else if (err & ATA_TRK0NF) {
- /* help it find track zero */
- rq->errors |= ERROR_RECAL;
- }
- }
-
- if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
- (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
- int nsect = drive->mult_count ? drive->mult_count : 1;
-
- ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
- }
-
- if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
- ide_kill_rq(drive, rq);
- return ide_stopped;
- }
-
- if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
- rq->errors |= ERROR_RESET;
-
- if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
- ++rq->errors;
- return ide_do_reset(drive);
- }
-
- if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
- drive->special.b.recalibrate = 1;
-
- ++rq->errors;
-
- return ide_stopped;
-}
-
-static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- if ((stat & ATA_BUSY) ||
- ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
- /* other bits are useless when BUSY */
- rq->errors |= ERROR_RESET;
- } else {
- /* add decoding error stuff */
- }
-
- if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
- /* force an abort */
- hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
-
- if (rq->errors >= ERROR_MAX) {
- ide_kill_rq(drive, rq);
- } else {
- if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
- ++rq->errors;
- return ide_do_reset(drive);
- }
- ++rq->errors;
- }
-
- return ide_stopped;
-}
-
-static ide_startstop_t
-__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
-{
- if (drive->media == ide_disk)
- return ide_ata_error(drive, rq, stat, err);
- return ide_atapi_error(drive, rq, stat, err);
-}
-
-/**
- * ide_error - handle an error on the IDE
- * @drive: drive the error occurred on
- * @msg: message to report
- * @stat: status bits
- *
- * ide_error() takes action based on the error returned by the drive.
- * For normal I/O that may well include retries. We deal with
- * both new-style (taskfile) and old style command handling here.
- * In the case of taskfile command handling there is work left to
- * do
- */
-
-ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
-{
- struct request *rq;
- u8 err;
-
- err = ide_dump_status(drive, msg, stat);
-
- rq = drive->hwif->rq;
- if (rq == NULL)
- return ide_stopped;
-
- /* retry only "normal" I/O: */
- if (!blk_fs_request(rq)) {
- rq->errors = 1;
- ide_end_drive_cmd(drive, stat, err);
- return ide_stopped;
- }
-
- return __ide_error(drive, rq, stat, err);
-}
-EXPORT_SYMBOL_GPL(ide_error);
-
static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
{
tf->nsect = drive->sect;
@@ -490,71 +363,16 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
return ide_stopped;
}
-int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
- int arg)
-{
- struct request_queue *q = drive->queue;
- struct request *rq;
- int ret = 0;
-
- if (!(setting->flags & DS_SYNC))
- return setting->set(drive, arg);
-
- rq = blk_get_request(q, READ, __GFP_WAIT);
- rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd_len = 5;
- rq->cmd[0] = REQ_DEVSET_EXEC;
- *(int *)&rq->cmd[1] = arg;
- rq->special = setting->set;
-
- if (blk_execute_rq(q, NULL, rq, 0))
- ret = rq->errors;
- blk_put_request(rq);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ide_devset_execute);
-
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
{
u8 cmd = rq->cmd[0];
- if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
- ide_task_t task;
- struct ide_taskfile *tf = &task.tf;
-
- memset(&task, 0, sizeof(task));
- if (cmd == REQ_PARK_HEADS) {
- drive->sleep = *(unsigned long *)rq->special;
- drive->dev_flags |= IDE_DFLAG_SLEEPING;
- tf->command = ATA_CMD_IDLEIMMEDIATE;
- tf->feature = 0x44;
- tf->lbal = 0x4c;
- tf->lbam = 0x4e;
- tf->lbah = 0x55;
- task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
- } else /* cmd == REQ_UNPARK_HEADS */
- tf->command = ATA_CMD_CHK_POWER;
-
- task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
- task.rq = rq;
- drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
- return do_rw_taskfile(drive, &task);
- }
-
switch (cmd) {
+ case REQ_PARK_HEADS:
+ case REQ_UNPARK_HEADS:
+ return ide_do_park_unpark(drive, rq);
case REQ_DEVSET_EXEC:
- {
- int err, (*setfunc)(ide_drive_t *, int) = rq->special;
-
- err = setfunc(drive, *(int *)&rq->cmd[1]);
- if (err)
- rq->errors = err;
- else
- err = 1;
- ide_end_request(drive, err, 0);
- return ide_stopped;
- }
+ return ide_do_devset(drive, rq);
case REQ_DRIVE_RESET:
return ide_do_reset(drive);
default:
@@ -820,63 +638,6 @@ plug_device_2:
blk_plug_device(q);
}
-/*
- * un-busy the port etc, and clear any pending DMA status. we want to
- * retry the current request in pio mode instead of risking tossing it
- * all away
- */
-static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct request *rq;
- ide_startstop_t ret = ide_stopped;
-
- /*
- * end current dma transaction
- */
-
- if (error < 0) {
- printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
- (void)hwif->dma_ops->dma_end(drive);
- ret = ide_error(drive, "dma timeout error",
- hwif->tp_ops->read_status(hwif));
- } else {
- printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
- hwif->dma_ops->dma_timeout(drive);
- }
-
- /*
- * disable dma for now, but remember that we did so because of
- * a timeout -- we'll reenable after we finish this next request
- * (or rather the first chunk of it) in pio.
- */
- drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
- drive->retry_pio++;
- ide_dma_off_quietly(drive);
-
- /*
- * un-busy drive etc and make sure request is sane
- */
-
- rq = hwif->rq;
- if (!rq)
- goto out;
-
- hwif->rq = NULL;
-
- rq->errors = 0;
-
- if (!rq->bio)
- goto out;
-
- rq->sector = rq->bio->bi_sector;
- rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
- rq->hard_cur_sectors = rq->current_nr_sectors;
- rq->buffer = bio_data(rq->bio);
-out:
- return ret;
-}
-
static void ide_plug_device(ide_drive_t *drive)
{
struct request_queue *q = drive->queue;
@@ -888,6 +649,29 @@ static void ide_plug_device(ide_drive_t *drive)
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static int drive_is_ready(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat = 0;
+
+ if (drive->waiting_for_dma)
+ return hwif->dma_ops->dma_test_irq(drive);
+
+ if (hwif->io_ports.ctl_addr &&
+ (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
+ stat = hwif->tp_ops->read_altstatus(hwif);
+ else
+ /* Note: this may clear a pending IRQ!! */
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (stat & ATA_BUSY)
+ /* drive busy: definitely not interrupting */
+ return 0;
+
+ /* drive ready: *might* be interrupting */
+ return 1;
+}
+
/**
* ide_timer_expiry - handle lack of an IDE interrupt
* @data: timer callback magic (hwif)
@@ -1164,54 +948,6 @@ out_early:
}
EXPORT_SYMBOL_GPL(ide_intr);
-/**
- * ide_do_drive_cmd - issue IDE special command
- * @drive: device to issue command
- * @rq: request to issue
- *
- * This function issues a special IDE device request
- * onto the request queue.
- *
- * the rq is queued at the head of the request queue, displacing
- * the currently-being-processed request and this function
- * returns immediately without waiting for the new rq to be
- * completed. This is VERY DANGEROUS, and is intended for
- * careful use by the ATAPI tape/cdrom driver code.
- */
-
-void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
-{
- struct request_queue *q = drive->queue;
- unsigned long flags;
-
- drive->hwif->rq = NULL;
-
- spin_lock_irqsave(q->queue_lock, flags);
- __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(ide_do_drive_cmd);
-
-void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
-{
- ide_hwif_t *hwif = drive->hwif;
- ide_task_t task;
-
- memset(&task, 0, sizeof(task));
- task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
- IDE_TFLAG_OUT_FEATURE | tf_flags;
- task.tf.feature = dma; /* Use PIO/DMA */
- task.tf.lbam = bcount & 0xff;
- task.tf.lbah = (bcount >> 8) & 0xff;
-
- ide_tf_dump(drive->name, &task.tf);
- hwif->tp_ops->set_irq(hwif, 1);
- SELECT_MASK(drive, 0);
- hwif->tp_ops->tf_load(drive, &task);
-}
-
-EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
-
void ide_pad_transfer(ide_drive_t *drive, int write, int len)
{
ide_hwif_t *hwif = drive->hwif;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index b1892bd95c6..317c5dadd7c 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -27,35 +27,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-/*
- * Conventional PIO operations for ATA devices
- */
-
-static u8 ide_inb (unsigned long port)
-{
- return (u8) inb(port);
-}
-
-static void ide_outb (u8 val, unsigned long port)
-{
- outb(val, port);
-}
-
-/*
- * MMIO operations, typically used for SATA controllers
- */
-
-static u8 ide_mm_inb (unsigned long port)
-{
- return (u8) readb((void __iomem *) port);
-}
-
-static void ide_mm_outb (u8 value, unsigned long port)
-{
- writeb(value, (void __iomem *) port);
-}
-
-void SELECT_DRIVE (ide_drive_t *drive)
+void SELECT_DRIVE(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
const struct ide_port_ops *port_ops = hwif->port_ops;
@@ -78,277 +50,6 @@ void SELECT_MASK(ide_drive_t *drive, int mask)
port_ops->maskproc(drive, mask);
}
-void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
- else
- outb(cmd, hwif->io_ports.command_addr);
-}
-EXPORT_SYMBOL_GPL(ide_exec_command);
-
-u8 ide_read_status(ide_hwif_t *hwif)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)hwif->io_ports.status_addr);
- else
- return inb(hwif->io_ports.status_addr);
-}
-EXPORT_SYMBOL_GPL(ide_read_status);
-
-u8 ide_read_altstatus(ide_hwif_t *hwif)
-{
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- return readb((void __iomem *)hwif->io_ports.ctl_addr);
- else
- return inb(hwif->io_ports.ctl_addr);
-}
-EXPORT_SYMBOL_GPL(ide_read_altstatus);
-
-void ide_set_irq(ide_hwif_t *hwif, int on)
-{
- u8 ctl = ATA_DEVCTL_OBS;
-
- if (on == 4) { /* hack for SRST */
- ctl |= 4;
- on &= ~4;
- }
-
- ctl |= on ? 0 : 2;
-
- if (hwif->host_flags & IDE_HFLAG_MMIO)
- writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
- else
- outb(ctl, hwif->io_ports.ctl_addr);
-}
-EXPORT_SYMBOL_GPL(ide_set_irq);
-
-void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- struct ide_taskfile *tf = &task->tf;
- void (*tf_outb)(u8 addr, unsigned long port);
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
- u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
-
- if (mmio)
- tf_outb = ide_mm_outb;
- else
- tf_outb = ide_outb;
-
- if (task->tf_flags & IDE_TFLAG_FLAGGED)
- HIHI = 0xFF;
-
- if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
- u16 data = (tf->hob_data << 8) | tf->data;
-
- if (mmio)
- writew(data, (void __iomem *)io_ports->data_addr);
- else
- outw(data, io_ports->data_addr);
- }
-
- if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
- tf_outb(tf->hob_feature, io_ports->feature_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
- tf_outb(tf->hob_nsect, io_ports->nsect_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
- tf_outb(tf->hob_lbal, io_ports->lbal_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
- tf_outb(tf->hob_lbam, io_ports->lbam_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
- tf_outb(tf->hob_lbah, io_ports->lbah_addr);
-
- if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
- tf_outb(tf->feature, io_ports->feature_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
- tf_outb(tf->nsect, io_ports->nsect_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
- tf_outb(tf->lbal, io_ports->lbal_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
- tf_outb(tf->lbam, io_ports->lbam_addr);
- if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
- tf_outb(tf->lbah, io_ports->lbah_addr);
-
- if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
- tf_outb((tf->device & HIHI) | drive->select,
- io_ports->device_addr);
-}
-EXPORT_SYMBOL_GPL(ide_tf_load);
-
-void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- struct ide_taskfile *tf = &task->tf;
- void (*tf_outb)(u8 addr, unsigned long port);
- u8 (*tf_inb)(unsigned long port);
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- if (mmio) {
- tf_outb = ide_mm_outb;
- tf_inb = ide_mm_inb;
- } else {
- tf_outb = ide_outb;
- tf_inb = ide_inb;
- }
-
- if (task->tf_flags & IDE_TFLAG_IN_DATA) {
- u16 data;
-
- if (mmio)
- data = readw((void __iomem *)io_ports->data_addr);
- else
- data = inw(io_ports->data_addr);
-
- tf->data = data & 0xff;
- tf->hob_data = (data >> 8) & 0xff;
- }
-
- /* be sure we're looking at the low order bits */
- tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
-
- if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
- tf->feature = tf_inb(io_ports->feature_addr);
- if (task->tf_flags & IDE_TFLAG_IN_NSECT)
- tf->nsect = tf_inb(io_ports->nsect_addr);
- if (task->tf_flags & IDE_TFLAG_IN_LBAL)
- tf->lbal = tf_inb(io_ports->lbal_addr);
- if (task->tf_flags & IDE_TFLAG_IN_LBAM)
- tf->lbam = tf_inb(io_ports->lbam_addr);
- if (task->tf_flags & IDE_TFLAG_IN_LBAH)
- tf->lbah = tf_inb(io_ports->lbah_addr);
- if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
- tf->device = tf_inb(io_ports->device_addr);
-
- if (task->tf_flags & IDE_TFLAG_LBA48) {
- tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
-
- if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
- tf->hob_feature = tf_inb(io_ports->feature_addr);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
- tf->hob_nsect = tf_inb(io_ports->nsect_addr);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
- tf->hob_lbal = tf_inb(io_ports->lbal_addr);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
- tf->hob_lbam = tf_inb(io_ports->lbam_addr);
- if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
- tf->hob_lbah = tf_inb(io_ports->lbah_addr);
- }
-}
-EXPORT_SYMBOL_GPL(ide_tf_read);
-
-/*
- * Some localbus EIDE interfaces require a special access sequence
- * when using 32-bit I/O instructions to transfer data. We call this
- * the "vlb_sync" sequence, which consists of three successive reads
- * of the sector count register location, with interrupts disabled
- * to ensure that the reads all happen together.
- */
-static void ata_vlb_sync(unsigned long port)
-{
- (void)inb(port);
- (void)inb(port);
- (void)inb(port);
-}
-
-/*
- * This is used for most PIO data transfers *from* the IDE interface
- *
- * These routines will round up any request for an odd number of bytes,
- * so if an odd len is specified, be sure that there's at least one
- * extra byte allocated for the buffer.
- */
-void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
- unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long data_addr = io_ports->data_addr;
- u8 io_32bit = drive->io_32bit;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- len++;
-
- if (io_32bit) {
- unsigned long uninitialized_var(flags);
-
- if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- if (mmio)
- __ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
- else
- insl(data_addr, buf, len / 4);
-
- if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
-
- if ((len & 3) >= 2) {
- if (mmio)
- __ide_mm_insw((void __iomem *)data_addr,
- (u8 *)buf + (len & ~3), 1);
- else
- insw(data_addr, (u8 *)buf + (len & ~3), 1);
- }
- } else {
- if (mmio)
- __ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
- else
- insw(data_addr, buf, len / 2);
- }
-}
-EXPORT_SYMBOL_GPL(ide_input_data);
-
-/*
- * This is used for most PIO data transfers *to* the IDE interface
- */
-void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
- unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long data_addr = io_ports->data_addr;
- u8 io_32bit = drive->io_32bit;
- u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
-
- len++;
-
- if (io_32bit) {
- unsigned long uninitialized_var(flags);
-
- if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- if (mmio)
- __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
- else
- outsl(data_addr, buf, len / 4);
-
- if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
-
- if ((len & 3) >= 2) {
- if (mmio)
- __ide_mm_outsw((void __iomem *)data_addr,
- (u8 *)buf + (len & ~3), 1);
- else
- outsw(data_addr, (u8 *)buf + (len & ~3), 1);
- }
- } else {
- if (mmio)
- __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
- else
- outsw(data_addr, buf, len / 2);
- }
-}
-EXPORT_SYMBOL_GPL(ide_output_data);
-
u8 ide_read_error(ide_drive_t *drive)
{
ide_task_t task;
@@ -362,35 +63,6 @@ u8 ide_read_error(ide_drive_t *drive)
}
EXPORT_SYMBOL_GPL(ide_read_error);
-void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
-{
- ide_task_t task;
-
- memset(&task, 0, sizeof(task));
- task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
- IDE_TFLAG_IN_NSECT;
-
- drive->hwif->tp_ops->tf_read(drive, &task);
-
- *bcount = (task.tf.lbah << 8) | task.tf.lbam;
- *ireason = task.tf.nsect & 3;
-}
-EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
-
-const struct ide_tp_ops default_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
-
- .set_irq = ide_set_irq,
-
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
void ide_fix_driveid(u16 *id)
{
#ifndef __LITTLE_ENDIAN
@@ -412,7 +84,7 @@ void ide_fix_driveid(u16 *id)
* returned by the ATA_CMD_ID_ATA[PI] commands.
*/
-void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
+void ide_fixstring(u8 *s, const int bytecount, const int byteswap)
{
u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
@@ -435,44 +107,9 @@ void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
while (p != end)
*p++ = '\0';
}
-
EXPORT_SYMBOL(ide_fixstring);
/*
- * Needed for PCI irq sharing
- */
-int drive_is_ready (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 stat = 0;
-
- if (drive->waiting_for_dma)
- return hwif->dma_ops->dma_test_irq(drive);
-
- /*
- * We do a passive status test under shared PCI interrupts on
- * cards that truly share the ATA side interrupt, but may also share
- * an interrupt with another pci card/device. We make no assumptions
- * about possible isa-pnp and pci-pnp issues yet.
- */
- if (hwif->io_ports.ctl_addr &&
- (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
- stat = hwif->tp_ops->read_altstatus(hwif);
- else
- /* Note: this may clear a pending IRQ!! */
- stat = hwif->tp_ops->read_status(hwif);
-
- if (stat & ATA_BUSY)
- /* drive busy: definitely not interrupting */
- return 0;
-
- /* drive ready: *might* be interrupting */
- return 1;
-}
-
-EXPORT_SYMBOL(drive_is_ready);
-
-/*
* This routine busy-waits for the drive status to be not "busy".
* It then checks the status for all of the "good" bits and none
* of the "bad" bits, and if all is okay it returns 0. All other
@@ -483,7 +120,8 @@ EXPORT_SYMBOL(drive_is_ready);
* setting a timer to wake up at half second intervals thereafter,
* until timeout is achieved, before timing out.
*/
-static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
+static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
+ unsigned long timeout, u8 *rstat)
{
ide_hwif_t *hwif = drive->hwif;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
@@ -541,7 +179,8 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
* The caller should return the updated value of "startstop" in this case,
* "startstop" is unchanged when the function returns 0.
*/
-int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
+int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good,
+ u8 bad, unsigned long timeout)
{
int err;
u8 stat;
@@ -561,7 +200,6 @@ int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 ba
return err;
}
-
EXPORT_SYMBOL(ide_wait_stat);
/**
@@ -582,7 +220,6 @@ int ide_in_drive_list(u16 *id, const struct drive_list_entry *table)
return 1;
return 0;
}
-
EXPORT_SYMBOL_GPL(ide_in_drive_list);
/*
@@ -607,7 +244,7 @@ static const struct drive_list_entry ivb_list[] = {
* All hosts that use the 80c ribbon must use!
* The name is derived from upper byte of word 93 and the 80c ribbon.
*/
-u8 eighty_ninty_three (ide_drive_t *drive)
+u8 eighty_ninty_three(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u16 *id = drive->id;
@@ -652,47 +289,19 @@ no_80w:
int ide_driveid_update(ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
u16 *id;
- unsigned long flags;
- u8 stat;
-
- /*
- * Re-read drive->id for possible DMA mode
- * change (copied from ide-probe.c)
- */
-
- SELECT_MASK(drive, 1);
- tp_ops->set_irq(hwif, 0);
- msleep(50);
- tp_ops->exec_command(hwif, ATA_CMD_ID_ATA);
+ int rc;
- if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 1)) {
- SELECT_MASK(drive, 0);
+ id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
+ if (id == NULL)
return 0;
- }
-
- msleep(50); /* wait for IRQ and ATA_DRQ */
- stat = tp_ops->read_status(hwif);
- if (!OK_STAT(stat, ATA_DRQ, BAD_R_STAT)) {
- SELECT_MASK(drive, 0);
- printk("%s: CHECK for good STATUS\n", drive->name);
- return 0;
- }
- local_irq_save(flags);
+ SELECT_MASK(drive, 1);
+ rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id);
SELECT_MASK(drive, 0);
- id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
- if (!id) {
- local_irq_restore(flags);
- return 0;
- }
- tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
- (void)tp_ops->read_status(hwif); /* clear drive IRQ */
- local_irq_enable();
- local_irq_restore(flags);
- ide_fix_driveid(id);
+
+ if (rc)
+ goto out_err;
drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES];
drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
@@ -705,6 +314,12 @@ int ide_driveid_update(ide_drive_t *drive)
ide_dma_off(drive);
return 1;
+out_err:
+ SELECT_MASK(drive, 0);
+ if (rc == 2)
+ printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
+ kfree(id);
+ return 0;
}
int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
@@ -731,18 +346,15 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
* but for some reason these don't work at
* this point (lost interrupt).
*/
- /*
- * Select the drive, and issue the SETFEATURES command
- */
- disable_irq_nosync(hwif->irq);
-
+
/*
* FIXME: we race against the running IRQ here if
* this is called from non IRQ context. If we use
* disable_irq() we hang on the error path. Work
* is needed.
*/
-
+ disable_irq_nosync(hwif->irq);
+
udelay(1);
SELECT_DRIVE(drive);
SELECT_MASK(drive, 1);
@@ -812,8 +424,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
*
* See also ide_execute_command
*/
-static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
- unsigned int timeout, ide_expiry_t *expiry)
+void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler,
+ unsigned int timeout, ide_expiry_t *expiry)
{
ide_hwif_t *hwif = drive->hwif;
@@ -835,9 +447,8 @@ void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
__ide_set_handler(drive, handler, timeout, expiry);
spin_unlock_irqrestore(&hwif->lock, flags);
}
-
EXPORT_SYMBOL(ide_set_handler);
-
+
/**
* ide_execute_command - execute an IDE command
* @drive: IDE drive to issue the command against
@@ -847,7 +458,7 @@ EXPORT_SYMBOL(ide_set_handler);
* @expiry: handler to run on timeout
*
* Helper function to issue an IDE command. This handles the
- * atomicity requirements, command timing and ensures that the
+ * atomicity requirements, command timing and ensures that the
* handler and IRQ setup do not race. All IDE command kick off
* should go via this function or do equivalent locking.
*/
@@ -884,301 +495,6 @@ void ide_execute_pkt_cmd(ide_drive_t *drive)
}
EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
-static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
-{
- struct request *rq = drive->hwif->rq;
-
- if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET)
- ide_end_request(drive, err ? err : 1, 0);
-}
-
-/* needed below */
-static ide_startstop_t do_reset1 (ide_drive_t *, int);
-
-/*
- * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
- * during an atapi drive reset operation. If the drive has not yet responded,
- * and we have not yet hit our maximum waiting time, then the timer is restarted
- * for another 50ms.
- */
-static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 stat;
-
- SELECT_DRIVE(drive);
- udelay (10);
- stat = hwif->tp_ops->read_status(hwif);
-
- if (OK_STAT(stat, 0, ATA_BUSY))
- printk("%s: ATAPI reset complete\n", drive->name);
- else {
- if (time_before(jiffies, hwif->poll_timeout)) {
- ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
- /* continue polling */
- return ide_started;
- }
- /* end of polling */
- hwif->polling = 0;
- printk("%s: ATAPI reset timed-out, status=0x%02x\n",
- drive->name, stat);
- /* do it the old fashioned way */
- return do_reset1(drive, 1);
- }
- /* done polling */
- hwif->polling = 0;
- ide_complete_drive_reset(drive, 0);
- return ide_stopped;
-}
-
-static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
-{
- static const char *err_master_vals[] =
- { NULL, "passed", "formatter device error",
- "sector buffer error", "ECC circuitry error",
- "controlling MPU error" };
-
- u8 err_master = err & 0x7f;
-
- printk(KERN_ERR "%s: reset: master: ", hwif->name);
- if (err_master && err_master < 6)
- printk(KERN_CONT "%s", err_master_vals[err_master]);
- else
- printk(KERN_CONT "error (0x%02x?)", err);
- if (err & 0x80)
- printk(KERN_CONT "; slave: failed");
- printk(KERN_CONT "\n");
-}
-
-/*
- * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
- * during an ide reset operation. If the drives have not yet responded,
- * and we have not yet hit our maximum waiting time, then the timer is restarted
- * for another 50ms.
- */
-static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- u8 tmp;
- int err = 0;
-
- if (port_ops && port_ops->reset_poll) {
- err = port_ops->reset_poll(drive);
- if (err) {
- printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
- hwif->name, drive->name);
- goto out;
- }
- }
-
- tmp = hwif->tp_ops->read_status(hwif);
-
- if (!OK_STAT(tmp, 0, ATA_BUSY)) {
- if (time_before(jiffies, hwif->poll_timeout)) {
- ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
- /* continue polling */
- return ide_started;
- }
- printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
- drive->failures++;
- err = -EIO;
- } else {
- tmp = ide_read_error(drive);
-
- if (tmp == 1) {
- printk(KERN_INFO "%s: reset: success\n", hwif->name);
- drive->failures = 0;
- } else {
- ide_reset_report_error(hwif, tmp);
- drive->failures++;
- err = -EIO;
- }
- }
-out:
- hwif->polling = 0; /* done polling */
- ide_complete_drive_reset(drive, err);
- return ide_stopped;
-}
-
-static void ide_disk_pre_reset(ide_drive_t *drive)
-{
- int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
-
- drive->special.all = 0;
- drive->special.b.set_geometry = legacy;
- drive->special.b.recalibrate = legacy;
-
- drive->mult_count = 0;
- drive->dev_flags &= ~IDE_DFLAG_PARKED;
-
- if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
- (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
- drive->mult_req = 0;
-
- if (drive->mult_req != drive->mult_count)
- drive->special.b.set_multmode = 1;
-}
-
-static void pre_reset(ide_drive_t *drive)
-{
- const struct ide_port_ops *port_ops = drive->hwif->port_ops;
-
- if (drive->media == ide_disk)
- ide_disk_pre_reset(drive);
- else
- drive->dev_flags |= IDE_DFLAG_POST_RESET;
-
- if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
- if (drive->crc_count)
- ide_check_dma_crc(drive);
- else
- ide_dma_off(drive);
- }
-
- if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
- drive->io_32bit = 0;
- }
- return;
- }
-
- if (port_ops && port_ops->pre_reset)
- port_ops->pre_reset(drive);
-
- if (drive->current_speed != 0xff)
- drive->desired_speed = drive->current_speed;
- drive->current_speed = 0xff;
-}
-
-/*
- * do_reset1() attempts to recover a confused drive by resetting it.
- * Unfortunately, resetting a disk drive actually resets all devices on
- * the same interface, so it can really be thought of as resetting the
- * interface rather than resetting the drive.
- *
- * ATAPI devices have their own reset mechanism which allows them to be
- * individually reset without clobbering other devices on the same interface.
- *
- * Unfortunately, the IDE interface does not generate an interrupt to let
- * us know when the reset operation has finished, so we must poll for this.
- * Equally poor, though, is the fact that this may a very long time to complete,
- * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
- * we set a timer to poll at 50ms intervals.
- */
-static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- const struct ide_port_ops *port_ops;
- ide_drive_t *tdrive;
- unsigned long flags, timeout;
- int i;
- DEFINE_WAIT(wait);
-
- spin_lock_irqsave(&hwif->lock, flags);
-
- /* We must not reset with running handlers */
- BUG_ON(hwif->handler != NULL);
-
- /* For an ATAPI device, first try an ATAPI SRST. */
- if (drive->media != ide_disk && !do_not_try_atapi) {
- pre_reset(drive);
- SELECT_DRIVE(drive);
- udelay (20);
- tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
- ndelay(400);
- hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
- hwif->polling = 1;
- __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
- spin_unlock_irqrestore(&hwif->lock, flags);
- return ide_started;
- }
-
- /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
- do {
- unsigned long now;
-
- prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
- timeout = jiffies;
- ide_port_for_each_dev(i, tdrive, hwif) {
- if (tdrive->dev_flags & IDE_DFLAG_PRESENT &&
- tdrive->dev_flags & IDE_DFLAG_PARKED &&
- time_after(tdrive->sleep, timeout))
- timeout = tdrive->sleep;
- }
-
- now = jiffies;
- if (time_before_eq(timeout, now))
- break;
-
- spin_unlock_irqrestore(&hwif->lock, flags);
- timeout = schedule_timeout_uninterruptible(timeout - now);
- spin_lock_irqsave(&hwif->lock, flags);
- } while (timeout);
- finish_wait(&ide_park_wq, &wait);
-
- /*
- * First, reset any device state data we were maintaining
- * for any of the drives on this interface.
- */
- ide_port_for_each_dev(i, tdrive, hwif)
- pre_reset(tdrive);
-
- if (io_ports->ctl_addr == 0) {
- spin_unlock_irqrestore(&hwif->lock, flags);
- ide_complete_drive_reset(drive, -ENXIO);
- return ide_stopped;
- }
-
- /*
- * Note that we also set nIEN while resetting the device,
- * to mask unwanted interrupts from the interface during the reset.
- * However, due to the design of PC hardware, this will cause an
- * immediate interrupt due to the edge transition it produces.
- * This single interrupt gives us a "fast poll" for drives that
- * recover from reset very quickly, saving us the first 50ms wait time.
- *
- * TODO: add ->softreset method and stop abusing ->set_irq
- */
- /* set SRST and nIEN */
- tp_ops->set_irq(hwif, 4);
- /* more than enough time */
- udelay(10);
- /* clear SRST, leave nIEN (unless device is on the quirk list) */
- tp_ops->set_irq(hwif, drive->quirk_list == 2);
- /* more than enough time */
- udelay(10);
- hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
- hwif->polling = 1;
- __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
-
- /*
- * Some weird controller like resetting themselves to a strange
- * state when the disks are reset this way. At least, the Winbond
- * 553 documentation says that
- */
- port_ops = hwif->port_ops;
- if (port_ops && port_ops->resetproc)
- port_ops->resetproc(drive);
-
- spin_unlock_irqrestore(&hwif->lock, flags);
- return ide_started;
-}
-
-/*
- * ide_do_reset() is the entry point to the drive/interface reset code.
- */
-
-ide_startstop_t ide_do_reset (ide_drive_t *drive)
-{
- return do_reset1(drive, 0);
-}
-
-EXPORT_SYMBOL(ide_do_reset);
-
/*
* ide_wait_not_busy() waits for the currently selected device on the hwif
* to report a non-busy status, see comments in ide_probe_port().
@@ -1187,7 +503,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
{
u8 stat = 0;
- while(timeout--) {
+ while (timeout--) {
/*
* Turn this into a schedule() sleep once I'm sure
* about locking issues (2.5 work ?).
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 09526a0de73..f6c683dd298 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -5,163 +5,6 @@
#include <linux/ide.h>
#include <linux/bitops.h>
-static const char *udma_str[] =
- { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
- "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
-static const char *mwdma_str[] =
- { "MWDMA0", "MWDMA1", "MWDMA2" };
-static const char *swdma_str[] =
- { "SWDMA0", "SWDMA1", "SWDMA2" };
-static const char *pio_str[] =
- { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5" };
-
-/**
- * ide_xfer_verbose - return IDE mode names
- * @mode: transfer mode
- *
- * Returns a constant string giving the name of the mode
- * requested.
- */
-
-const char *ide_xfer_verbose(u8 mode)
-{
- const char *s;
- u8 i = mode & 0xf;
-
- if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7)
- s = udma_str[i];
- else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_2)
- s = mwdma_str[i];
- else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2)
- s = swdma_str[i];
- else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_5)
- s = pio_str[i & 0x7];
- else if (mode == XFER_PIO_SLOW)
- s = "PIO SLOW";
- else
- s = "XFER ERROR";
-
- return s;
-}
-EXPORT_SYMBOL(ide_xfer_verbose);
-
-/**
- * ide_rate_filter - filter transfer mode
- * @drive: IDE device
- * @speed: desired speed
- *
- * Given the available transfer modes this function returns
- * the best available speed at or below the speed requested.
- *
- * TODO: check device PIO capabilities
- */
-
-static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
-{
- ide_hwif_t *hwif = drive->hwif;
- u8 mode = ide_find_dma_mode(drive, speed);
-
- if (mode == 0) {
- if (hwif->pio_mask)
- mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0;
- else
- mode = XFER_PIO_4;
- }
-
-/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
-
- return min(speed, mode);
-}
-
-/**
- * ide_get_best_pio_mode - get PIO mode from drive
- * @drive: drive to consider
- * @mode_wanted: preferred mode
- * @max_mode: highest allowed mode
- *
- * This routine returns the recommended PIO settings for a given drive,
- * based on the drive->id information and the ide_pio_blacklist[].
- *
- * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
- * This is used by most chipset support modules when "auto-tuning".
- */
-
-u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
-{
- u16 *id = drive->id;
- int pio_mode = -1, overridden = 0;
-
- if (mode_wanted != 255)
- return min_t(u8, mode_wanted, max_mode);
-
- if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0)
- pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]);
-
- if (pio_mode != -1) {
- printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name);
- } else {
- pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8;
- if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
- pio_mode = 2;
- overridden = 1;
- }
-
- if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */
- if (ata_id_has_iordy(id)) {
- if (id[ATA_ID_PIO_MODES] & 7) {
- overridden = 0;
- if (id[ATA_ID_PIO_MODES] & 4)
- pio_mode = 5;
- else if (id[ATA_ID_PIO_MODES] & 2)
- pio_mode = 4;
- else
- pio_mode = 3;
- }
- }
- }
-
- if (overridden)
- printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
- drive->name);
- }
-
- if (pio_mode > max_mode)
- pio_mode = max_mode;
-
- return pio_mode;
-}
-EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
-
-/* req_pio == "255" for auto-tune */
-void ide_set_pio(ide_drive_t *drive, u8 req_pio)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
- u8 host_pio, pio;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return;
-
- BUG_ON(hwif->pio_mask == 0x00);
-
- host_pio = fls(hwif->pio_mask) - 1;
-
- pio = ide_get_best_pio_mode(drive, req_pio, host_pio);
-
- /*
- * TODO:
- * - report device max PIO mode
- * - check req_pio != 255 against device max PIO mode
- */
- printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n",
- drive->name, host_pio, req_pio,
- req_pio == 255 ? "(auto-tune)" : "", pio);
-
- (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
-}
-EXPORT_SYMBOL_GPL(ide_set_pio);
-
/**
* ide_toggle_bounce - handle bounce buffering
* @drive: drive to update
@@ -188,89 +31,6 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
blk_queue_bounce_limit(drive->queue, addr);
}
-int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- return 0;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL)
- return -1;
-
- /*
- * TODO: temporary hack for some legacy host drivers that didn't
- * set transfer mode on the device in ->set_pio_mode method...
- */
- if (port_ops->set_dma_mode == NULL) {
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
- return 0;
- }
-
- if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
- if (ide_config_drive_speed(drive, mode))
- return -1;
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
- return 0;
- } else {
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
- return ide_config_drive_speed(drive, mode);
- }
-}
-
-int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- return 0;
-
- if (port_ops == NULL || port_ops->set_dma_mode == NULL)
- return -1;
-
- if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
- if (ide_config_drive_speed(drive, mode))
- return -1;
- port_ops->set_dma_mode(drive, mode);
- return 0;
- } else {
- port_ops->set_dma_mode(drive, mode);
- return ide_config_drive_speed(drive, mode);
- }
-}
-EXPORT_SYMBOL_GPL(ide_set_dma_mode);
-
-/**
- * ide_set_xfer_rate - set transfer rate
- * @drive: drive to set
- * @rate: speed to attempt to set
- *
- * General helper for setting the speed of an IDE device. This
- * function knows about user enforced limits from the configuration
- * which ->set_pio_mode/->set_dma_mode does not.
- */
-
-int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return -1;
-
- rate = ide_rate_filter(drive, rate);
-
- BUG_ON(rate < XFER_PIO_0);
-
- if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
- return ide_set_pio_mode(drive, rate);
-
- return ide_set_dma_mode(drive, rate);
-}
-
static void ide_dump_opcode(ide_drive_t *drive)
{
struct request *rq = drive->hwif->rq;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index c875a957596..f30e52152fc 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -1,5 +1,6 @@
#include <linux/kernel.h>
#include <linux/ide.h>
+#include <linux/hdreg.h>
#include <linux/jiffies.h>
#include <linux/blkdev.h>
@@ -60,6 +61,30 @@ out:
return;
}
+ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
+{
+ ide_task_t task;
+ struct ide_taskfile *tf = &task.tf;
+
+ memset(&task, 0, sizeof(task));
+ if (rq->cmd[0] == REQ_PARK_HEADS) {
+ drive->sleep = *(unsigned long *)rq->special;
+ drive->dev_flags |= IDE_DFLAG_SLEEPING;
+ tf->command = ATA_CMD_IDLEIMMEDIATE;
+ tf->feature = 0x44;
+ tf->lbal = 0x4c;
+ tf->lbam = 0x4e;
+ tf->lbah = 0x55;
+ task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
+ } else /* cmd == REQ_UNPARK_HEADS */
+ tf->command = ATA_CMD_CHK_POWER;
+
+ task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ task.rq = rq;
+ drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
+ return do_rw_taskfile(drive, &task);
+}
+
ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index bddae2b329a..61111fd2713 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -33,8 +33,6 @@ static int ide_generic_all; /* Set to claim all devices */
module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
-#define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS)
-
#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
{ \
.name = DRV_NAME, \
@@ -61,7 +59,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
/* 2: SAMURAI / HT6565 / HINT_IDE */
DECLARE_GENERIC_PCI_DEV(0),
/* 3: UM8673F / UM8886A / UM8886BF */
- DECLARE_GENERIC_PCI_DEV(IDE_HFLAGS_UMC),
+ DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_DMA),
/* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */
DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA),
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index ee8e3e7cad5..974067043fb 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -181,16 +181,16 @@ static void ide_classify_atapi_dev(ide_drive_t *drive)
* do_identify - identify a drive
* @drive: drive to identify
* @cmd: command used
+ * @id: buffer for IDENTIFY data
*
* Called when we have issued a drive identify command to
* read and parse the results. This function is run with
* interrupts disabled.
*/
-static void do_identify(ide_drive_t *drive, u8 cmd)
+static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
{
ide_hwif_t *hwif = drive->hwif;
- u16 *id = drive->id;
char *m = (char *)&id[ATA_ID_PROD];
unsigned long flags;
int bswap = 1;
@@ -233,16 +233,6 @@ static void do_identify(ide_drive_t *drive, u8 cmd)
drive->dev_flags |= IDE_DFLAG_PRESENT;
drive->dev_flags &= ~IDE_DFLAG_DEAD;
- /*
- * Check for an ATAPI device
- */
- if (cmd == ATA_CMD_ID_ATAPI)
- ide_classify_atapi_dev(drive);
- else
- /*
- * Not an ATAPI device: looks like a "regular" hard disk
- */
- ide_classify_ata_dev(drive);
return;
err_misc:
kfree(id);
@@ -250,21 +240,19 @@ err_misc:
}
/**
- * actual_try_to_identify - send ata/atapi identify
+ * ide_dev_read_id - send ATA/ATAPI IDENTIFY command
* @drive: drive to identify
* @cmd: command to use
+ * @id: buffer for IDENTIFY data
*
- * try_to_identify() sends an ATA(PI) IDENTIFY request to a drive
- * and waits for a response. It also monitors irqs while this is
- * happening, in hope of automatically determining which one is
- * being used by the interface.
+ * Sends an ATA(PI) IDENTIFY request to a drive and waits for a response.
*
* Returns: 0 device was identified
* 1 device timed-out (no response to identify request)
* 2 device aborted the command (refused to identify itself)
*/
-static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
+int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -273,6 +261,13 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
unsigned long timeout;
u8 s = 0, a = 0;
+ /*
+ * Disable device IRQ. Otherwise we'll get spurious interrupts
+ * during the identify phase that the IRQ handler isn't expecting.
+ */
+ if (io_ports->ctl_addr)
+ tp_ops->set_irq(hwif, 0);
+
/* take a deep breath */
msleep(50);
@@ -317,7 +312,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
/* drive returned ID */
- do_identify(drive, cmd);
+ do_identify(drive, cmd, id);
/* drive responded with ID */
rc = 0;
/* clear drive IRQ */
@@ -329,63 +324,6 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
return rc;
}
-/**
- * try_to_identify - try to identify a drive
- * @drive: drive to probe
- * @cmd: command to use
- *
- * Issue the identify command and then do IRQ probing to
- * complete the identification when needed by finding the
- * IRQ the drive is attached to
- */
-
-static int try_to_identify (ide_drive_t *drive, u8 cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- int retval;
- int autoprobe = 0;
- unsigned long cookie = 0;
-
- /*
- * Disable device irq unless we need to
- * probe for it. Otherwise we'll get spurious
- * interrupts during the identify-phase that
- * the irq handler isn't expecting.
- */
- if (hwif->io_ports.ctl_addr) {
- if (!hwif->irq) {
- autoprobe = 1;
- cookie = probe_irq_on();
- }
- tp_ops->set_irq(hwif, autoprobe);
- }
-
- retval = actual_try_to_identify(drive, cmd);
-
- if (autoprobe) {
- int irq;
-
- tp_ops->set_irq(hwif, 0);
- /* clear drive IRQ */
- (void)tp_ops->read_status(hwif);
- udelay(5);
- irq = probe_irq_off(cookie);
- if (!hwif->irq) {
- if (irq > 0) {
- hwif->irq = irq;
- } else {
- /* Mmmm.. multiple IRQs..
- * don't know which was ours
- */
- printk(KERN_ERR "%s: IRQ probe failed (0x%lx)\n",
- drive->name, cookie);
- }
- }
- }
- return retval;
-}
-
int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
{
u8 stat;
@@ -440,6 +378,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = drive->hwif;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ u16 *id = drive->id;
int rc;
u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat;
@@ -475,11 +414,10 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) ||
present || cmd == ATA_CMD_ID_ATAPI) {
- /* send cmd and wait */
- if ((rc = try_to_identify(drive, cmd))) {
+ rc = ide_dev_read_id(drive, cmd, id);
+ if (rc)
/* failed: try again */
- rc = try_to_identify(drive,cmd);
- }
+ rc = ide_dev_read_id(drive, cmd, id);
stat = tp_ops->read_status(hwif);
@@ -494,7 +432,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
msleep(50);
tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
(void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0);
- rc = try_to_identify(drive, cmd);
+ rc = ide_dev_read_id(drive, cmd, id);
}
/* ensure drive IRQ is clear */
@@ -517,37 +455,6 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
return rc;
}
-/*
- *
- */
-static void enable_nest (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_tp_ops *tp_ops = hwif->tp_ops;
- u8 stat;
-
- printk(KERN_INFO "%s: enabling %s -- ",
- hwif->name, (char *)&drive->id[ATA_ID_PROD]);
-
- SELECT_DRIVE(drive);
- msleep(50);
- tp_ops->exec_command(hwif, ATA_EXABYTE_ENABLE_NEST);
-
- if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 0)) {
- printk(KERN_CONT "failed (timeout)\n");
- return;
- }
-
- msleep(50);
-
- stat = tp_ops->read_status(hwif);
-
- if (!OK_STAT(stat, 0, BAD_STAT))
- printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
- else
- printk(KERN_CONT "success\n");
-}
-
/**
* probe_for_drives - upper level drive probe
* @drive: drive to probe for
@@ -563,6 +470,8 @@ static void enable_nest (ide_drive_t *drive)
static u8 probe_for_drive(ide_drive_t *drive)
{
char *m;
+ int rc;
+ u8 cmd;
/*
* In order to keep things simple we have an id
@@ -586,21 +495,19 @@ static u8 probe_for_drive(ide_drive_t *drive)
/* skip probing? */
if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) {
-retry:
/* if !(success||timed-out) */
- if (do_probe(drive, ATA_CMD_ID_ATA) >= 2)
+ cmd = ATA_CMD_ID_ATA;
+ rc = do_probe(drive, cmd);
+ if (rc >= 2) {
/* look for ATAPI device */
- (void)do_probe(drive, ATA_CMD_ID_ATAPI);
+ cmd = ATA_CMD_ID_ATAPI;
+ rc = do_probe(drive, cmd);
+ }
if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
/* drive not found */
return 0;
- if (strstr(m, "E X A B Y T E N E S T")) {
- enable_nest(drive);
- goto retry;
- }
-
/* identification failed? */
if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
if (drive->media == ide_disk) {
@@ -614,8 +521,12 @@ retry:
printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name);
drive->dev_flags &= ~IDE_DFLAG_PRESENT;
}
+ } else {
+ if (cmd == ATA_CMD_ID_ATAPI)
+ ide_classify_atapi_dev(drive);
+ else
+ ide_classify_ata_dev(drive);
}
- /* drive was found */
}
if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
@@ -779,7 +690,6 @@ EXPORT_SYMBOL_GPL(ide_undecoded_slave);
static int ide_probe_port(ide_hwif_t *hwif)
{
ide_drive_t *drive;
- unsigned long flags;
unsigned int irqd;
int i, rc = -ENODEV;
@@ -797,9 +707,6 @@ static int ide_probe_port(ide_hwif_t *hwif)
if (irqd)
disable_irq(hwif->irq);
- local_save_flags(flags);
- local_irq_enable_in_hardirq();
-
if (ide_port_wait_ready(hwif) == -EBUSY)
printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
@@ -813,8 +720,6 @@ static int ide_probe_port(ide_hwif_t *hwif)
rc = 0;
}
- local_irq_restore(flags);
-
/*
* Use cached IRQ number. It might be (and is...) changed by probe
* code above
@@ -831,29 +736,18 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
ide_drive_t *drive;
int i;
- ide_port_for_each_dev(i, drive, hwif) {
- if (drive->dev_flags & IDE_DFLAG_PRESENT) {
- if (port_ops && port_ops->quirkproc)
- port_ops->quirkproc(drive);
- }
+ ide_port_for_each_present_dev(i, drive, hwif) {
+ if (port_ops && port_ops->quirkproc)
+ port_ops->quirkproc(drive);
}
- ide_port_for_each_dev(i, drive, hwif) {
- if (drive->dev_flags & IDE_DFLAG_PRESENT) {
- ide_set_max_pio(drive);
+ ide_port_for_each_present_dev(i, drive, hwif) {
+ ide_set_max_pio(drive);
- drive->dev_flags |= IDE_DFLAG_NICE1;
-
- if (hwif->dma_ops)
- ide_set_dma(drive);
- }
- }
+ drive->dev_flags |= IDE_DFLAG_NICE1;
- ide_port_for_each_dev(i, drive, hwif) {
- if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT)
- drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
- else
- drive->dev_flags &= ~IDE_DFLAG_NO_IO_32BIT;
+ if (hwif->dma_ops)
+ ide_set_dma(drive);
}
}
@@ -924,10 +818,7 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
int i, j = 0;
mutex_lock(&ide_cfg_mtx);
- ide_port_for_each_dev(i, drive, hwif) {
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- continue;
-
+ ide_port_for_each_present_dev(i, drive, hwif) {
if (ide_init_queue(drive)) {
printk(KERN_ERR "ide: failed to init %s\n",
drive->name);
@@ -953,13 +844,6 @@ static int init_irq (ide_hwif_t *hwif)
irq_handler_t irq_handler;
int sa = 0;
- mutex_lock(&ide_cfg_mtx);
- spin_lock_init(&hwif->lock);
-
- init_timer(&hwif->timer);
- hwif->timer.function = &ide_timer_expiry;
- hwif->timer.data = (unsigned long)hwif;
-
irq_handler = hwif->host->irq_handler;
if (irq_handler == NULL)
irq_handler = ide_intr;
@@ -997,10 +881,8 @@ static int init_irq (ide_hwif_t *hwif)
printk(KERN_CONT " (serialized)");
printk(KERN_CONT "\n");
- mutex_unlock(&ide_cfg_mtx);
return 0;
out_up:
- mutex_unlock(&ide_cfg_mtx);
return 1;
}
@@ -1099,14 +981,9 @@ static void drive_release_dev (struct device *dev)
static int hwif_init(ide_hwif_t *hwif)
{
- int old_irq;
-
if (!hwif->irq) {
- hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
- if (!hwif->irq) {
- printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
- return 0;
- }
+ printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
+ return 0;
}
if (register_blkdev(hwif->major, hwif->name))
@@ -1124,29 +1001,12 @@ static int hwif_init(ide_hwif_t *hwif)
sg_init_table(hwif->sg_table, hwif->sg_max_nents);
- if (init_irq(hwif) == 0)
- goto done;
-
- old_irq = hwif->irq;
- /*
- * It failed to initialise. Find the default IRQ for
- * this port and try that.
- */
- hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
- if (!hwif->irq) {
- printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
- hwif->name, old_irq);
- goto out;
- }
if (init_irq(hwif)) {
- printk(KERN_ERR "%s: probed IRQ %d and default IRQ %d failed\n",
- hwif->name, old_irq, hwif->irq);
+ printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
+ hwif->name, hwif->irq);
goto out;
}
- printk(KERN_WARNING "%s: probed IRQ %d failed, using default\n",
- hwif->name, hwif->irq);
-done:
blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS,
THIS_MODULE, ata_probe, ata_lock, hwif);
return 1;
@@ -1161,13 +1021,10 @@ static void hwif_register_devices(ide_hwif_t *hwif)
ide_drive_t *drive;
unsigned int i;
- ide_port_for_each_dev(i, drive, hwif) {
+ ide_port_for_each_present_dev(i, drive, hwif) {
struct device *dev = &drive->gendev;
int ret;
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
- continue;
-
dev_set_name(dev, "%u.%u", hwif->index, i);
dev->parent = &hwif->gendev;
dev->bus = &ide_bus_type;
@@ -1192,6 +1049,8 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
if (hwif->host_flags & IDE_HFLAG_IO_32BIT)
drive->io_32bit = 1;
+ if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT)
+ drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS)
drive->dev_flags |= IDE_DFLAG_UNMASK;
if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
@@ -1213,10 +1072,6 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
if (d->init_iops)
d->init_iops(hwif);
- if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) ||
- (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
- hwif->irq = port ? 15 : 14;
-
/* ->host_flags may be set by ->init_iops (or even earlier...) */
hwif->host_flags |= d->host_flags;
hwif->pio_mask = d->pio_mask;
@@ -1317,6 +1172,12 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
hwif->name[2] = 'e';
hwif->name[3] = '0' + index;
+ spin_lock_init(&hwif->lock);
+
+ init_timer(&hwif->timer);
+ hwif->timer.function = &ide_timer_expiry;
+ hwif->timer.data = (unsigned long)hwif;
+
init_completion(&hwif->gendev_rel_comp);
hwif->tp_ops = &default_tp_ops;
@@ -1567,7 +1428,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
j++;
- ide_acpi_init(hwif);
+ ide_acpi_init_port(hwif);
if (hwif->present)
ide_acpi_port_init_devices(hwif);
@@ -1624,11 +1485,9 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif)
ide_drive_t *drive;
int i;
- ide_port_for_each_dev(i, drive, hwif) {
- if (drive->dev_flags & IDE_DFLAG_PRESENT) {
- device_unregister(&drive->gendev);
- wait_for_completion(&drive->gendev_rel_comp);
- }
+ ide_port_for_each_present_dev(i, drive, hwif) {
+ device_unregister(&drive->gendev);
+ wait_for_completion(&drive->gendev_rel_comp);
}
}
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index a7b9287ee0d..417cde56eaf 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -600,7 +600,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
int i;
ide_port_for_each_dev(i, drive, hwif) {
- if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0 || drive->proc)
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
continue;
drive->proc = proc_mkdir(drive->name, parent);
diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c
new file mode 100644
index 00000000000..6910f6a257e
--- /dev/null
+++ b/drivers/ide/ide-xfer-mode.c
@@ -0,0 +1,246 @@
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ide.h>
+#include <linux/bitops.h>
+
+static const char *udma_str[] =
+ { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
+ "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
+static const char *mwdma_str[] =
+ { "MWDMA0", "MWDMA1", "MWDMA2" };
+static const char *swdma_str[] =
+ { "SWDMA0", "SWDMA1", "SWDMA2" };
+static const char *pio_str[] =
+ { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5" };
+
+/**
+ * ide_xfer_verbose - return IDE mode names
+ * @mode: transfer mode
+ *
+ * Returns a constant string giving the name of the mode
+ * requested.
+ */
+
+const char *ide_xfer_verbose(u8 mode)
+{
+ const char *s;
+ u8 i = mode & 0xf;
+
+ if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7)
+ s = udma_str[i];
+ else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_2)
+ s = mwdma_str[i];
+ else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2)
+ s = swdma_str[i];
+ else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_5)
+ s = pio_str[i & 0x7];
+ else if (mode == XFER_PIO_SLOW)
+ s = "PIO SLOW";
+ else
+ s = "XFER ERROR";
+
+ return s;
+}
+EXPORT_SYMBOL(ide_xfer_verbose);
+
+/**
+ * ide_get_best_pio_mode - get PIO mode from drive
+ * @drive: drive to consider
+ * @mode_wanted: preferred mode
+ * @max_mode: highest allowed mode
+ *
+ * This routine returns the recommended PIO settings for a given drive,
+ * based on the drive->id information and the ide_pio_blacklist[].
+ *
+ * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
+ * This is used by most chipset support modules when "auto-tuning".
+ */
+
+u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
+{
+ u16 *id = drive->id;
+ int pio_mode = -1, overridden = 0;
+
+ if (mode_wanted != 255)
+ return min_t(u8, mode_wanted, max_mode);
+
+ if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0)
+ pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]);
+
+ if (pio_mode != -1) {
+ printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name);
+ } else {
+ pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8;
+ if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
+ pio_mode = 2;
+ overridden = 1;
+ }
+
+ if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */
+ if (ata_id_has_iordy(id)) {
+ if (id[ATA_ID_PIO_MODES] & 7) {
+ overridden = 0;
+ if (id[ATA_ID_PIO_MODES] & 4)
+ pio_mode = 5;
+ else if (id[ATA_ID_PIO_MODES] & 2)
+ pio_mode = 4;
+ else
+ pio_mode = 3;
+ }
+ }
+ }
+
+ if (overridden)
+ printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
+ drive->name);
+ }
+
+ if (pio_mode > max_mode)
+ pio_mode = max_mode;
+
+ return pio_mode;
+}
+EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
+
+int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL)
+ return -1;
+
+ /*
+ * TODO: temporary hack for some legacy host drivers that didn't
+ * set transfer mode on the device in ->set_pio_mode method...
+ */
+ if (port_ops->set_dma_mode == NULL) {
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return 0;
+ }
+
+ if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
+ if (ide_config_drive_speed(drive, mode))
+ return -1;
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return 0;
+ } else {
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return ide_config_drive_speed(drive, mode);
+ }
+}
+
+int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
+
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL)
+ return -1;
+
+ if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
+ if (ide_config_drive_speed(drive, mode))
+ return -1;
+ port_ops->set_dma_mode(drive, mode);
+ return 0;
+ } else {
+ port_ops->set_dma_mode(drive, mode);
+ return ide_config_drive_speed(drive, mode);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_set_dma_mode);
+
+/* req_pio == "255" for auto-tune */
+void ide_set_pio(ide_drive_t *drive, u8 req_pio)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ u8 host_pio, pio;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return;
+
+ BUG_ON(hwif->pio_mask == 0x00);
+
+ host_pio = fls(hwif->pio_mask) - 1;
+
+ pio = ide_get_best_pio_mode(drive, req_pio, host_pio);
+
+ /*
+ * TODO:
+ * - report device max PIO mode
+ * - check req_pio != 255 against device max PIO mode
+ */
+ printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n",
+ drive->name, host_pio, req_pio,
+ req_pio == 255 ? "(auto-tune)" : "", pio);
+
+ (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
+}
+EXPORT_SYMBOL_GPL(ide_set_pio);
+
+/**
+ * ide_rate_filter - filter transfer mode
+ * @drive: IDE device
+ * @speed: desired speed
+ *
+ * Given the available transfer modes this function returns
+ * the best available speed at or below the speed requested.
+ *
+ * TODO: check device PIO capabilities
+ */
+
+static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 mode = ide_find_dma_mode(drive, speed);
+
+ if (mode == 0) {
+ if (hwif->pio_mask)
+ mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0;
+ else
+ mode = XFER_PIO_4;
+ }
+
+/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
+
+ return min(speed, mode);
+}
+
+/**
+ * ide_set_xfer_rate - set transfer rate
+ * @drive: drive to set
+ * @rate: speed to attempt to set
+ *
+ * General helper for setting the speed of an IDE device. This
+ * function knows about user enforced limits from the configuration
+ * which ->set_pio_mode/->set_dma_mode does not.
+ */
+
+int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return -1;
+
+ rate = ide_rate_filter(drive, rate);
+
+ BUG_ON(rate < XFER_PIO_0);
+
+ if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
+ return ide_set_pio_mode(drive, rate);
+
+ return ide_set_dma_mode(drive, rate);
+}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 0920e3b0c96..92c9b90931e 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -62,160 +62,6 @@
struct class *ide_port_class;
-/*
- * Locks for IDE setting functionality
- */
-
-DEFINE_MUTEX(ide_setting_mtx);
-
-ide_devset_get(io_32bit, io_32bit);
-
-static int set_io_32bit(ide_drive_t *drive, int arg)
-{
- if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT)
- return -EPERM;
-
- if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
- return -EINVAL;
-
- drive->io_32bit = arg;
-
- return 0;
-}
-
-ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS);
-
-static int set_ksettings(ide_drive_t *drive, int arg)
-{
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS;
- else
- drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS;
-
- return 0;
-}
-
-ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA);
-
-static int set_using_dma(ide_drive_t *drive, int arg)
-{
-#ifdef CONFIG_BLK_DEV_IDEDMA
- int err = -EPERM;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (ata_id_has_dma(drive->id) == 0)
- goto out;
-
- if (drive->hwif->dma_ops == NULL)
- goto out;
-
- err = 0;
-
- if (arg) {
- if (ide_set_dma(drive))
- err = -EIO;
- } else
- ide_dma_off(drive);
-
-out:
- return err;
-#else
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- return -EPERM;
-#endif
-}
-
-/*
- * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
- */
-static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
-{
- switch (req_pio) {
- case 202:
- case 201:
- case 200:
- case 102:
- case 101:
- case 100:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
- case 9:
- case 8:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
- case 7:
- case 6:
- return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
- default:
- return 0;
- }
-}
-
-static int set_pio_mode(ide_drive_t *drive, int arg)
-{
- ide_hwif_t *hwif = drive->hwif;
- const struct ide_port_ops *port_ops = hwif->port_ops;
-
- if (arg < 0 || arg > 255)
- return -EINVAL;
-
- if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
- (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
- return -ENOSYS;
-
- if (set_pio_mode_abuse(drive->hwif, arg)) {
- if (arg == 8 || arg == 9) {
- unsigned long flags;
-
- /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
- spin_lock_irqsave(&hwif->lock, flags);
- port_ops->set_pio_mode(drive, arg);
- spin_unlock_irqrestore(&hwif->lock, flags);
- } else
- port_ops->set_pio_mode(drive, arg);
- } else {
- int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
-
- ide_set_pio(drive, arg);
-
- if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
- if (keep_dma)
- ide_dma_on(drive);
- }
- }
-
- return 0;
-}
-
-ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK);
-
-static int set_unmaskirq(ide_drive_t *drive, int arg)
-{
- if (drive->dev_flags & IDE_DFLAG_NO_UNMASK)
- return -EPERM;
-
- if (arg < 0 || arg > 1)
- return -EINVAL;
-
- if (arg)
- drive->dev_flags |= IDE_DFLAG_UNMASK;
- else
- drive->dev_flags &= ~IDE_DFLAG_UNMASK;
-
- return 0;
-}
-
-ide_ext_devset_rw_sync(io_32bit, io_32bit);
-ide_ext_devset_rw_sync(keepsettings, ksettings);
-ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
-ide_ext_devset_rw_sync(using_dma, using_dma);
-__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
-
/**
* ide_device_get - get an additional reference to a ide_drive_t
* @drive: device to get a reference to
@@ -527,6 +373,8 @@ static int __init ide_init(void)
goto out_port_class;
}
+ ide_acpi_init();
+
proc_ide_create();
return 0;
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index 13b8153112e..6b9fc950b4a 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -603,7 +603,7 @@ static void it8212_disable_raid(struct pci_dev *dev)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
}
-static unsigned int init_chipset_it821x(struct pci_dev *dev)
+static int init_chipset_it821x(struct pci_dev *dev)
{
u8 conf;
static char *mode[2] = { "pass through", "smart" };
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
index 83643ed9a42..ea48a3ee806 100644
--- a/drivers/ide/ns87415.c
+++ b/drivers/ide/ns87415.c
@@ -286,9 +286,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
}
if (!using_inta)
- hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
- else if (!hwif->irq && hwif->mate && hwif->mate->irq)
- hwif->irq = hwif->mate->irq; /* share IRQ with mate */
+ hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
if (!hwif->dma_base)
return;
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index f21290c4b44..b68906c3c17 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -325,7 +325,7 @@ static void apple_kiwi_init(struct pci_dev *pdev)
}
#endif /* CONFIG_PPC_PMAC */
-static unsigned int init_chipset_pdcnew(struct pci_dev *dev)
+static int init_chipset_pdcnew(struct pci_dev *dev)
{
const char *name = DRV_NAME;
unsigned long dma_base = pci_resource_start(dev, 4);
@@ -444,7 +444,7 @@ static unsigned int init_chipset_pdcnew(struct pci_dev *dev)
#endif
out:
- return dev->irq;
+ return 0;
}
static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 97193323aeb..cba66ebce4e 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -264,7 +264,7 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive)
ide_dma_timeout(drive);
}
-static unsigned int init_chipset_pdc202xx(struct pci_dev *dev)
+static int init_chipset_pdc202xx(struct pci_dev *dev)
{
unsigned long dmabase = pci_resource_start(dev, 4);
u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
@@ -290,7 +290,7 @@ static unsigned int init_chipset_pdc202xx(struct pci_dev *dev)
printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
}
out:
- return dev->irq;
+ return 0;
}
static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index f1e2e4ef0d7..2aa69993306 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -204,7 +204,7 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
* out to be nice and simple.
*/
-static unsigned int init_chipset_ich(struct pci_dev *dev)
+static int init_chipset_ich(struct pci_dev *dev)
{
u32 extra = 0;
@@ -318,19 +318,12 @@ static const struct ide_port_ops ich_port_ops = {
.cable_detect = piix_cable_detect,
};
-#ifndef CONFIG_IA64
- #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS
-#else
- #define IDE_HFLAGS_PIIX 0
-#endif
-
#define DECLARE_PIIX_DEV(udma) \
{ \
.name = DRV_NAME, \
.init_hwif = init_hwif_piix, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
.port_ops = &piix_port_ops, \
- .host_flags = IDE_HFLAGS_PIIX, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
.mwdma_mask = ATA_MWDMA12_ONLY, \
@@ -344,7 +337,6 @@ static const struct ide_port_ops ich_port_ops = {
.init_hwif = init_hwif_piix, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
.port_ops = &ich_port_ops, \
- .host_flags = IDE_HFLAGS_PIIX, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
.mwdma_mask = ATA_MWDMA12_ONLY, \
@@ -360,8 +352,7 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = {
*/
.name = DRV_NAME,
.enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
- .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA |
- IDE_HFLAGS_PIIX,
+ .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
/* This is a painful system best to let it self tune for now */
},
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index 382102ba467..b6554ef9271 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -175,7 +175,7 @@ static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x54, ultra_enable);
}
-static unsigned int init_chipset_svwks(struct pci_dev *dev)
+static int init_chipset_svwks(struct pci_dev *dev)
{
unsigned int reg;
u8 btr;
@@ -270,7 +270,7 @@ static unsigned int init_chipset_svwks(struct pci_dev *dev)
pci_write_config_byte(dev, 0x5A, btr);
}
- return dev->irq;
+ return 0;
}
static u8 ata66_svwks_svwks(ide_hwif_t *hwif)
@@ -353,14 +353,11 @@ static const struct ide_port_ops svwks_port_ops = {
.cable_detect = svwks_cable_detect,
};
-#define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS
-
static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
{ /* 0: OSB4 */
.name = DRV_NAME,
.init_chipset = init_chipset_svwks,
.port_ops = &osb4_port_ops,
- .host_flags = IDE_HFLAGS_SVWKS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = 0x00, /* UDMA is problematic on OSB4 */
@@ -369,7 +366,6 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_svwks,
.port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAGS_SVWKS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -378,7 +374,6 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_svwks,
.port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAGS_SVWKS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -387,7 +382,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_svwks,
.port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
+ .host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -396,7 +391,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_svwks,
.port_ops = &svwks_port_ops,
- .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
+ .host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index e85d1ed29c2..24bc884826f 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -305,7 +305,6 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
* @dev: PCI device holding interface
* @d: IDE port info
* @port: port number
- * @irq: PCI IRQ
* @hw: hw_regs_t instance corresponding to this port
*
* Perform the initial set up for the hardware interface structure. This
@@ -316,7 +315,7 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
*/
static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
- unsigned int port, int irq, hw_regs_t *hw)
+ unsigned int port, hw_regs_t *hw)
{
unsigned long ctl = 0, base = 0;
@@ -344,7 +343,6 @@ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
}
memset(hw, 0, sizeof(*hw));
- hw->irq = irq;
hw->dev = &dev->dev;
hw->chipset = d->chipset ? d->chipset : ide_pci;
ide_std_init_ports(hw, base, ctl | 2);
@@ -448,7 +446,6 @@ out:
* ide_pci_setup_ports - configure ports/devices on PCI IDE
* @dev: PCI device
* @d: IDE port info
- * @pciirq: IRQ line
* @hw: hw_regs_t instances corresponding to this PCI IDE device
* @hws: hw_regs_t pointers table to update
*
@@ -462,7 +459,7 @@ out:
*/
void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
- int pciirq, hw_regs_t *hw, hw_regs_t **hws)
+ hw_regs_t *hw, hw_regs_t **hws)
{
int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
u8 tmp;
@@ -481,7 +478,7 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
continue; /* port not enabled */
}
- if (ide_hw_configure(dev, d, port, pciirq, hw + port))
+ if (ide_hw_configure(dev, d, port, hw + port))
continue;
*(hws + port) = hw + port;
@@ -524,7 +521,7 @@ static int do_ide_setup_pci_device(struct pci_dev *dev,
if (noisy)
printk(KERN_INFO "%s %s: not 100%% native mode: will "
"probe irqs later\n", d->name, pci_name(dev));
- pciirq = ret;
+ pciirq = 0;
} else if (!pciirq && noisy) {
printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
d->name, pci_name(dev), pciirq);
@@ -549,7 +546,7 @@ int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
if (ret < 0)
goto out;
- ide_pci_setup_ports(dev, d, 0, &hw[0], &hws[0]);
+ ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
host = ide_host_alloc(d, hws);
if (host == NULL) {
@@ -568,7 +565,11 @@ int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
goto out;
/* fixup IRQ */
- hw[1].irq = hw[0].irq = ret;
+ if (ide_pci_is_in_compatibility_mode(dev)) {
+ hw[0].irq = pci_get_legacy_ide_irq(dev, 0);
+ hw[1].irq = pci_get_legacy_ide_irq(dev, 1);
+ } else
+ hw[1].irq = hw[0].irq = ret;
ret = ide_host_register(host, d, hws);
if (ret)
@@ -591,7 +592,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
if (ret < 0)
goto out;
- ide_pci_setup_ports(pdev[i], d, 0, &hw[i*2], &hws[i*2]);
+ ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
}
host = ide_host_alloc(d, hws);
@@ -619,7 +620,11 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
goto out;
/* fixup IRQ */
- hw[i*2 + 1].irq = hw[i*2].irq = ret;
+ if (ide_pci_is_in_compatibility_mode(pdev[i])) {
+ hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0);
+ hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1);
+ } else
+ hw[i*2 + 1].irq = hw[i*2].irq = ret;
}
ret = ide_host_register(host, d, hws);
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index cb2b352b876..1811ae9cd84 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -464,7 +464,7 @@ static void sil_sata_pre_reset(ide_drive_t *drive)
* to 133 MHz clocking if the system isn't already set up to do it.
*/
-static unsigned int init_chipset_siimage(struct pci_dev *dev)
+static int init_chipset_siimage(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
void __iomem *ioaddr = host->host_priv;
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 9ec1a4a4432..afca22beaad 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -447,7 +447,7 @@ static int __devinit sis_find_family(struct pci_dev *dev)
return chipset_family;
}
-static unsigned int init_chipset_sis5513(struct pci_dev *dev)
+static int init_chipset_sis5513(struct pci_dev *dev)
{
/* Make general config ops here
1/ tell IDE channels to operate in Compatibility mode only
@@ -563,7 +563,7 @@ static const struct ide_port_info sis5513_chipset __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_sis5513,
.enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
- .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA,
+ .host_flags = IDE_HFLAG_NO_AUTODMA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
};
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index 6297956507c..dba213c51ba 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -271,7 +271,7 @@ static u8 sl82c105_bridge_revision(struct pci_dev *dev)
* channel 0 here at least, but channel 1 has to be enabled by
* firmware or arch code. We still set both to 16 bits mode.
*/
-static unsigned int init_chipset_sl82c105(struct pci_dev *dev)
+static int init_chipset_sl82c105(struct pci_dev *dev)
{
u32 val;
@@ -281,7 +281,7 @@ static unsigned int init_chipset_sl82c105(struct pci_dev *dev)
val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
pci_write_config_dword(dev, 0x40, val);
- return dev->irq;
+ return 0;
}
static const struct ide_port_ops sl82c105_port_ops = {
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
index 40b4b94a428..f55d7d6313e 100644
--- a/drivers/ide/slc90e66.c
+++ b/drivers/ide/slc90e66.c
@@ -136,7 +136,6 @@ static const struct ide_port_info slc90e66_chipset __devinitdata = {
.name = DRV_NAME,
.enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
.port_ops = &slc90e66_port_ops,
- .host_flags = IDE_HFLAG_LEGACY_IRQS,
.pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2_ONLY,
.mwdma_mask = ATA_MWDMA12_ONLY,
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
index b6a1285a402..1c09e549c42 100644
--- a/drivers/ide/trm290.c
+++ b/drivers/ide/trm290.c
@@ -277,9 +277,6 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
if (reg & 0x10)
/* legacy mode */
hwif->irq = hwif->channel ? 15 : 14;
- else if (!hwif->irq && hwif->mate && hwif->mate->irq)
- /* sharing IRQ with mate */
- hwif->irq = hwif->mate->irq;
#if 1
{
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 6092fe3f409..3ff7231e485 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -267,7 +267,7 @@ static void via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
* and initialize its drive independent registers.
*/
-static unsigned int init_chipset_via82cxxx(struct pci_dev *dev)
+static int init_chipset_via82cxxx(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct via82cxxx_dev *vdev = host->host_priv;
@@ -443,16 +443,6 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
if ((via_config->flags & VIA_NO_UNMASK) == 0)
d.host_flags |= IDE_HFLAG_UNMASK_IRQS;
-#ifdef CONFIG_PPC_CHRP
- if (machine_is(chrp) && _chrp_type == _CHRP_Pegasos)
- d.host_flags |= IDE_HFLAG_FORCE_LEGACY_IRQS;
-#endif
-
-#ifdef CONFIG_AMIGAONE
- if (machine_is(amigaone))
- d.host_flags |= IDE_HFLAG_FORCE_LEGACY_IRQS;
-#endif
-
d.udma_mask = via_config->udma_mask;
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
diff --git a/drivers/input/joystick/maplecontrol.c b/drivers/input/joystick/maplecontrol.c
index e50047bfe93..77cfde571bd 100644
--- a/drivers/input/joystick/maplecontrol.c
+++ b/drivers/input/joystick/maplecontrol.c
@@ -3,7 +3,7 @@
* Based on drivers/usb/iforce.c
*
* Copyright Yaegashi Takeshi, 2001
- * Adrian McMenamin, 2008
+ * Adrian McMenamin, 2008 - 2009
*/
#include <linux/kernel.h>
@@ -29,7 +29,7 @@ static void dc_pad_callback(struct mapleq *mq)
struct maple_device *mapledev = mq->dev;
struct dc_pad *pad = maple_get_drvdata(mapledev);
struct input_dev *dev = pad->dev;
- unsigned char *res = mq->recvbuf;
+ unsigned char *res = mq->recvbuf->buf;
buttons = ~le16_to_cpup((__le16 *)(res + 8));
diff --git a/drivers/input/keyboard/maple_keyb.c b/drivers/input/keyboard/maple_keyb.c
index 22f17a593be..5aa2361aef9 100644
--- a/drivers/input/keyboard/maple_keyb.c
+++ b/drivers/input/keyboard/maple_keyb.c
@@ -1,8 +1,8 @@
/*
* SEGA Dreamcast keyboard driver
* Based on drivers/usb/usbkbd.c
- * Copyright YAEGASHI Takeshi, 2001
- * Porting to 2.6 Copyright Adrian McMenamin, 2007, 2008
+ * Copyright (c) YAEGASHI Takeshi, 2001
+ * Porting to 2.6 Copyright (c) Adrian McMenamin, 2007 - 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,7 +33,7 @@ static DEFINE_MUTEX(maple_keyb_mutex);
#define NR_SCANCODES 256
-MODULE_AUTHOR("YAEGASHI Takeshi, Adrian McMenamin");
+MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk");
MODULE_DESCRIPTION("SEGA Dreamcast keyboard driver");
MODULE_LICENSE("GPL");
@@ -115,7 +115,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd)
input_event(dev, EV_MSC, MSC_SCAN, code);
input_report_key(dev, keycode, 0);
} else
- printk(KERN_DEBUG "maple_keyb: "
+ dev_dbg(&dev->dev,
"Unknown key (scancode %#x) released.",
code);
}
@@ -127,7 +127,7 @@ static void dc_scan_kbd(struct dc_kbd *kbd)
input_event(dev, EV_MSC, MSC_SCAN, code);
input_report_key(dev, keycode, 1);
} else
- printk(KERN_DEBUG "maple_keyb: "
+ dev_dbg(&dev->dev,
"Unknown key (scancode %#x) pressed.",
code);
}
@@ -140,7 +140,7 @@ static void dc_kbd_callback(struct mapleq *mq)
{
struct maple_device *mapledev = mq->dev;
struct dc_kbd *kbd = maple_get_drvdata(mapledev);
- unsigned long *buf = mq->recvbuf;
+ unsigned long *buf = (unsigned long *)(mq->recvbuf->buf);
/*
* We should always get the lock because the only
@@ -159,22 +159,27 @@ static void dc_kbd_callback(struct mapleq *mq)
static int probe_maple_kbd(struct device *dev)
{
- struct maple_device *mdev = to_maple_dev(dev);
- struct maple_driver *mdrv = to_maple_driver(dev->driver);
+ struct maple_device *mdev;
+ struct maple_driver *mdrv;
int i, error;
struct dc_kbd *kbd;
struct input_dev *idev;
- if (!(mdev->function & MAPLE_FUNC_KEYBOARD))
- return -EINVAL;
+ mdev = to_maple_dev(dev);
+ mdrv = to_maple_driver(dev->driver);
kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL);
- idev = input_allocate_device();
- if (!kbd || !idev) {
+ if (!kbd) {
error = -ENOMEM;
goto fail;
}
+ idev = input_allocate_device();
+ if (!idev) {
+ error = -ENOMEM;
+ goto fail_idev_alloc;
+ }
+
kbd->dev = idev;
memcpy(kbd->keycode, dc_kbd_keycode, sizeof(kbd->keycode));
@@ -195,7 +200,7 @@ static int probe_maple_kbd(struct device *dev)
error = input_register_device(idev);
if (error)
- goto fail;
+ goto fail_register;
/* Maple polling is locked to VBLANK - which may be just 50/s */
maple_getcond_callback(mdev, dc_kbd_callback, HZ/50,
@@ -207,10 +212,12 @@ static int probe_maple_kbd(struct device *dev)
return error;
-fail:
+fail_register:
+ maple_set_drvdata(mdev, NULL);
input_free_device(idev);
+fail_idev_alloc:
kfree(kbd);
- maple_set_drvdata(mdev, NULL);
+fail:
return error;
}
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 5c8a1bcf7ca..e1480fb11de 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -219,6 +219,8 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
pdata->scan_timing, priv->iomem_base + KYCR1_OFFS);
iowrite16(0, priv->iomem_base + KYOUTDR_OFFS);
iowrite16(KYCR2_IRQ_LEVEL, priv->iomem_base + KYCR2_OFFS);
+
+ device_init_wakeup(&pdev->dev, 1);
return 0;
err5:
free_irq(irq, pdev);
@@ -253,17 +255,33 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev)
return 0;
}
+static int sh_keysc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_keysc_priv *priv = platform_get_drvdata(pdev);
+ unsigned short value;
+
+ value = ioread16(priv->iomem_base + KYCR1_OFFS);
+
+ if (device_may_wakeup(dev))
+ value |= 0x80;
+ else
+ value &= ~0x80;
-#define sh_keysc_suspend NULL
-#define sh_keysc_resume NULL
+ iowrite16(value, priv->iomem_base + KYCR1_OFFS);
+ return 0;
+}
+
+static struct dev_pm_ops sh_keysc_dev_pm_ops = {
+ .suspend = sh_keysc_suspend,
+};
struct platform_driver sh_keysc_device_driver = {
.probe = sh_keysc_probe,
.remove = __devexit_p(sh_keysc_remove),
- .suspend = sh_keysc_suspend,
- .resume = sh_keysc_resume,
.driver = {
.name = "sh_keysc",
+ .pm = &sh_keysc_dev_pm_ops,
}
};
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 66c755c116d..ce98d955231 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -803,9 +803,10 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
return (-ENOENT);
}
- card = snd_card_new(index[devno], id[devno], THIS_MODULE, sizeof(snd_cx88_card_t));
- if (!card)
- return (-ENOMEM);
+ err = snd_card_create(index[devno], id[devno], THIS_MODULE,
+ sizeof(snd_cx88_card_t), &card);
+ if (err < 0)
+ return err;
card->private_free = snd_cx88_dev_free;
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 2ac738fa6a0..f132e31f6ed 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -448,9 +448,10 @@ static int em28xx_audio_init(struct em28xx *dev)
printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
"Rechberger\n");
- card = snd_card_new(index[devnr], "Em28xx Audio", THIS_MODULE, 0);
- if (card == NULL)
- return -ENOMEM;
+ err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
+ &card);
+ if (err < 0)
+ return err;
spin_lock_init(&adev->slock);
err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index c750d3dd57d..8b0b64a8987 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -990,10 +990,10 @@ static int alsa_card_saa7134_create(struct saa7134_dev *dev, int devnum)
if (!enable[devnum])
return -ENODEV;
- card = snd_card_new(index[devnum], id[devnum], THIS_MODULE, sizeof(snd_card_saa7134_t));
-
- if (card == NULL)
- return -ENOMEM;
+ err = snd_card_create(index[devnum], id[devnum], THIS_MODULE,
+ sizeof(snd_card_saa7134_t), &card);
+ if (err < 0)
+ return err;
strcpy(card->driver, "SAA7134");
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 043d50fb6ef..729f899a5cd 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -551,5 +551,15 @@ config MTD_PLATRAM
This selection automatically selects the map_ram driver.
-endmenu
+config MTD_VMU
+ tristate "Map driver for Dreamcast VMU"
+ depends on MAPLE
+ help
+ This driver enables access to the Dreamcast Visual Memory Unit (VMU).
+
+ Most Dreamcast users will want to say Y here.
+ To build this as a module select M here, the module will be called
+ vmu-flash.
+
+endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 6d9ba35caf1..26b28a7a90b 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -61,3 +61,4 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
+obj-$(CONFIG_MTD_VMU) += vmu-flash.o
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
new file mode 100644
index 00000000000..1f73297e777
--- /dev/null
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -0,0 +1,832 @@
+/* vmu-flash.c
+ * Driver for SEGA Dreamcast Visual Memory Unit
+ *
+ * Copyright (c) Adrian McMenamin 2002 - 2009
+ * Copyright (c) Paul Mundt 2001
+ *
+ * Licensed under version 2 of the
+ * GNU General Public Licence
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/maple.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+struct vmu_cache {
+ unsigned char *buffer; /* Cache */
+ unsigned int block; /* Which block was cached */
+ unsigned long jiffies_atc; /* When was it cached? */
+ int valid;
+};
+
+struct mdev_part {
+ struct maple_device *mdev;
+ int partition;
+};
+
+struct vmupart {
+ u16 user_blocks;
+ u16 root_block;
+ u16 numblocks;
+ char *name;
+ struct vmu_cache *pcache;
+};
+
+struct memcard {
+ u16 tempA;
+ u16 tempB;
+ u32 partitions;
+ u32 blocklen;
+ u32 writecnt;
+ u32 readcnt;
+ u32 removeable;
+ int partition;
+ int read;
+ unsigned char *blockread;
+ struct vmupart *parts;
+ struct mtd_info *mtd;
+};
+
+struct vmu_block {
+ unsigned int num; /* block number */
+ unsigned int ofs; /* block offset */
+};
+
+static struct vmu_block *ofs_to_block(unsigned long src_ofs,
+ struct mtd_info *mtd, int partition)
+{
+ struct vmu_block *vblock;
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int num;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ card = maple_get_drvdata(mdev);
+
+ if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
+ goto failed;
+
+ num = src_ofs / card->blocklen;
+ if (num > card->parts[partition].numblocks)
+ goto failed;
+
+ vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
+ if (!vblock)
+ goto failed;
+
+ vblock->num = num;
+ vblock->ofs = src_ofs % card->blocklen;
+ return vblock;
+
+failed:
+ return NULL;
+}
+
+/* Maple bus callback function for reads */
+static void vmu_blockread(struct mapleq *mq)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+
+ mdev = mq->dev;
+ card = maple_get_drvdata(mdev);
+ /* copy the read in data */
+
+ if (unlikely(!card->blockread))
+ return;
+
+ memcpy(card->blockread, mq->recvbuf->buf + 12,
+ card->blocklen/card->readcnt);
+
+}
+
+/* Interface with maple bus to read blocks
+ * caching the results so that other parts
+ * of the driver can access block reads */
+static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
+ struct mtd_info *mtd)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ int partition, error = 0, x, wait;
+ unsigned char *blockread = NULL;
+ struct vmu_cache *pcache;
+ __be32 sendbuf;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+ pcache = card->parts[partition].pcache;
+ pcache->valid = 0;
+
+ /* prepare the cache for this block */
+ if (!pcache->buffer) {
+ pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!pcache->buffer) {
+ dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
+ " to lack of memory\n", mdev->port,
+ mdev->unit);
+ error = -ENOMEM;
+ goto outB;
+ }
+ }
+
+ /*
+ * Reads may be phased - again the hardware spec
+ * supports this - though may not be any devices in
+ * the wild that implement it, but we will here
+ */
+ for (x = 0; x < card->readcnt; x++) {
+ sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
+
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ dev_notice(&mdev->dev, "VMU at (%d, %d)"
+ " is busy\n", mdev->port, mdev->unit);
+ error = -EAGAIN;
+ goto outB;
+ }
+ }
+
+ atomic_set(&mdev->busy, 1);
+ blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
+ if (!blockread) {
+ error = -ENOMEM;
+ atomic_set(&mdev->busy, 0);
+ goto outB;
+ }
+ card->blockread = blockread;
+
+ maple_getcond_callback(mdev, vmu_blockread, 0,
+ MAPLE_FUNC_MEMCARD);
+ error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_BREAD, 2, &sendbuf);
+ /* Very long timeouts seem to be needed when box is stressed */
+ wait = wait_event_interruptible_timeout(mdev->maple_wait,
+ (atomic_read(&mdev->busy) == 0 ||
+ atomic_read(&mdev->busy) == 2), HZ * 3);
+ /*
+ * MTD layer does not handle hotplugging well
+ * so have to return errors when VMU is unplugged
+ * in the middle of a read (busy == 2)
+ */
+ if (error || atomic_read(&mdev->busy) == 2) {
+ if (atomic_read(&mdev->busy) == 2)
+ error = -ENXIO;
+ atomic_set(&mdev->busy, 0);
+ card->blockread = NULL;
+ goto outA;
+ }
+ if (wait == 0 || wait == -ERESTARTSYS) {
+ card->blockread = NULL;
+ atomic_set(&mdev->busy, 0);
+ error = -EIO;
+ list_del_init(&(mdev->mq->list));
+ kfree(mdev->mq->sendbuf);
+ mdev->mq->sendbuf = NULL;
+ if (wait == -ERESTARTSYS) {
+ dev_warn(&mdev->dev, "VMU read on (%d, %d)"
+ " interrupted on block 0x%X\n",
+ mdev->port, mdev->unit, num);
+ } else
+ dev_notice(&mdev->dev, "VMU read on (%d, %d)"
+ " timed out on block 0x%X\n",
+ mdev->port, mdev->unit, num);
+ goto outA;
+ }
+
+ memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
+ card->blocklen/card->readcnt);
+
+ memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
+ card->blockread, card->blocklen/card->readcnt);
+ card->blockread = NULL;
+ pcache->block = num;
+ pcache->jiffies_atc = jiffies;
+ pcache->valid = 1;
+ kfree(blockread);
+ }
+
+ return error;
+
+outA:
+ kfree(blockread);
+outB:
+ return error;
+}
+
+/* communicate with maple bus for phased writing */
+static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
+ struct mtd_info *mtd)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ int partition, error, locking, x, phaselen, wait;
+ __be32 *sendbuf;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ phaselen = card->blocklen/card->writecnt;
+
+ sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
+ if (!sendbuf) {
+ error = -ENOMEM;
+ goto fail_nosendbuf;
+ }
+ for (x = 0; x < card->writecnt; x++) {
+ sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
+ memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
+ /* wait until the device is not busy doing something else
+ * or 1 second - which ever is longer */
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ error = -EBUSY;
+ dev_notice(&mdev->dev, "VMU write at (%d, %d)"
+ "failed - device is busy\n",
+ mdev->port, mdev->unit);
+ goto fail_nolock;
+ }
+ }
+ atomic_set(&mdev->busy, 1);
+
+ locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
+ wait = wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ/10);
+ if (locking) {
+ error = -EIO;
+ atomic_set(&mdev->busy, 0);
+ goto fail_nolock;
+ }
+ if (atomic_read(&mdev->busy) == 2) {
+ atomic_set(&mdev->busy, 0);
+ } else if (wait == 0 || wait == -ERESTARTSYS) {
+ error = -EIO;
+ dev_warn(&mdev->dev, "Write at (%d, %d) of block"
+ " 0x%X at phase %d failed: could not"
+ " communicate with VMU", mdev->port,
+ mdev->unit, num, x);
+ atomic_set(&mdev->busy, 0);
+ kfree(mdev->mq->sendbuf);
+ mdev->mq->sendbuf = NULL;
+ list_del_init(&(mdev->mq->list));
+ goto fail_nolock;
+ }
+ }
+ kfree(sendbuf);
+
+ return card->blocklen;
+
+fail_nolock:
+ kfree(sendbuf);
+fail_nosendbuf:
+ dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
+ mdev->unit);
+ return error;
+}
+
+/* mtd function to simulate reading byte by byte */
+static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
+ struct mtd_info *mtd)
+{
+ struct vmu_block *vblock;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct maple_device *mdev;
+ unsigned char *buf, ret;
+ int partition, error;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+ *retval = 0;
+
+ buf = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!buf) {
+ *retval = 1;
+ ret = -ENOMEM;
+ goto finish;
+ }
+
+ vblock = ofs_to_block(ofs, mtd, partition);
+ if (!vblock) {
+ *retval = 3;
+ ret = -ENOMEM;
+ goto out_buf;
+ }
+
+ error = maple_vmu_read_block(vblock->num, buf, mtd);
+ if (error) {
+ ret = error;
+ *retval = 2;
+ goto out_vblock;
+ }
+
+ ret = buf[vblock->ofs];
+
+out_vblock:
+ kfree(vblock);
+out_buf:
+ kfree(buf);
+finish:
+ return ret;
+}
+
+/* mtd higher order function to read flash */
+static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ struct vmu_cache *pcache;
+ struct vmu_block *vblock;
+ int index = 0, retval, partition, leftover, numblocks;
+ unsigned char cx;
+
+ if (len < 1)
+ return -EIO;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ numblocks = card->parts[partition].numblocks;
+ if (from + len > numblocks * card->blocklen)
+ len = numblocks * card->blocklen - from;
+ if (len == 0)
+ return -EIO;
+ /* Have we cached this bit already? */
+ pcache = card->parts[partition].pcache;
+ do {
+ vblock = ofs_to_block(from + index, mtd, partition);
+ if (!vblock)
+ return -ENOMEM;
+ /* Have we cached this and is the cache valid and timely? */
+ if (pcache->valid &&
+ time_before(jiffies, pcache->jiffies_atc + HZ) &&
+ (pcache->block == vblock->num)) {
+ /* we have cached it, so do necessary copying */
+ leftover = card->blocklen - vblock->ofs;
+ if (vblock->ofs + len - index < card->blocklen) {
+ /* only a bit of this block to copy */
+ memcpy(buf + index,
+ pcache->buffer + vblock->ofs,
+ len - index);
+ index = len;
+ } else {
+ /* otherwise copy remainder of whole block */
+ memcpy(buf + index, pcache->buffer +
+ vblock->ofs, leftover);
+ index += leftover;
+ }
+ } else {
+ /*
+ * Not cached so read one byte -
+ * but cache the rest of the block
+ */
+ cx = vmu_flash_read_char(from + index, &retval, mtd);
+ if (retval) {
+ *retlen = index;
+ kfree(vblock);
+ return cx;
+ }
+ memset(buf + index, cx, 1);
+ index++;
+ }
+ kfree(vblock);
+ } while (len > index);
+ *retlen = index;
+
+ return 0;
+}
+
+static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct maple_device *mdev;
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int index = 0, partition, error = 0, numblocks;
+ struct vmu_cache *pcache;
+ struct vmu_block *vblock;
+ unsigned char *buffer;
+
+ mpart = mtd->priv;
+ mdev = mpart->mdev;
+ partition = mpart->partition;
+ card = maple_get_drvdata(mdev);
+
+ /* simple sanity checks */
+ if (len < 1) {
+ error = -EIO;
+ goto failed;
+ }
+ numblocks = card->parts[partition].numblocks;
+ if (to + len > numblocks * card->blocklen)
+ len = numblocks * card->blocklen - to;
+ if (len == 0) {
+ error = -EIO;
+ goto failed;
+ }
+
+ vblock = ofs_to_block(to, mtd, partition);
+ if (!vblock) {
+ error = -ENOMEM;
+ goto failed;
+ }
+
+ buffer = kmalloc(card->blocklen, GFP_KERNEL);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto fail_buffer;
+ }
+
+ do {
+ /* Read in the block we are to write to */
+ error = maple_vmu_read_block(vblock->num, buffer, mtd);
+ if (error)
+ goto fail_io;
+
+ do {
+ buffer[vblock->ofs] = buf[index];
+ vblock->ofs++;
+ index++;
+ if (index >= len)
+ break;
+ } while (vblock->ofs < card->blocklen);
+
+ /* write out new buffer */
+ error = maple_vmu_write_block(vblock->num, buffer, mtd);
+ /* invalidate the cache */
+ pcache = card->parts[partition].pcache;
+ pcache->valid = 0;
+
+ if (error != card->blocklen)
+ goto fail_io;
+
+ vblock->num++;
+ vblock->ofs = 0;
+ } while (len > index);
+
+ kfree(buffer);
+ *retlen = index;
+ kfree(vblock);
+ return 0;
+
+fail_io:
+ kfree(buffer);
+fail_buffer:
+ kfree(vblock);
+failed:
+ dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
+ return error;
+}
+
+static void vmu_flash_sync(struct mtd_info *mtd)
+{
+ /* Do nothing here */
+}
+
+/* Maple bus callback function to recursively query hardware details */
+static void vmu_queryblocks(struct mapleq *mq)
+{
+ struct maple_device *mdev;
+ unsigned short *res;
+ struct memcard *card;
+ __be32 partnum;
+ struct vmu_cache *pcache;
+ struct mdev_part *mpart;
+ struct mtd_info *mtd_cur;
+ struct vmupart *part_cur;
+ int error;
+
+ mdev = mq->dev;
+ card = maple_get_drvdata(mdev);
+ res = (unsigned short *) (mq->recvbuf->buf);
+ card->tempA = res[12];
+ card->tempB = res[6];
+
+ dev_info(&mdev->dev, "VMU device at partition %d has %d user "
+ "blocks with a root block at %d\n", card->partition,
+ card->tempA, card->tempB);
+
+ part_cur = &card->parts[card->partition];
+ part_cur->user_blocks = card->tempA;
+ part_cur->root_block = card->tempB;
+ part_cur->numblocks = card->tempB + 1;
+ part_cur->name = kmalloc(12, GFP_KERNEL);
+ if (!part_cur->name)
+ goto fail_name;
+
+ sprintf(part_cur->name, "vmu%d.%d.%d",
+ mdev->port, mdev->unit, card->partition);
+ mtd_cur = &card->mtd[card->partition];
+ mtd_cur->name = part_cur->name;
+ mtd_cur->type = 8;
+ mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
+ mtd_cur->size = part_cur->numblocks * card->blocklen;
+ mtd_cur->erasesize = card->blocklen;
+ mtd_cur->write = vmu_flash_write;
+ mtd_cur->read = vmu_flash_read;
+ mtd_cur->sync = vmu_flash_sync;
+ mtd_cur->writesize = card->blocklen;
+
+ mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
+ if (!mpart)
+ goto fail_mpart;
+
+ mpart->mdev = mdev;
+ mpart->partition = card->partition;
+ mtd_cur->priv = mpart;
+ mtd_cur->owner = THIS_MODULE;
+
+ pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
+ if (!pcache)
+ goto fail_cache_create;
+ part_cur->pcache = pcache;
+
+ error = add_mtd_device(mtd_cur);
+ if (error)
+ goto fail_mtd_register;
+
+ maple_getcond_callback(mdev, NULL, 0,
+ MAPLE_FUNC_MEMCARD);
+
+ /*
+ * Set up a recursive call to the (probably theoretical)
+ * second or more partition
+ */
+ if (++card->partition < card->partitions) {
+ partnum = cpu_to_be32(card->partition << 24);
+ maple_getcond_callback(mdev, vmu_queryblocks, 0,
+ MAPLE_FUNC_MEMCARD);
+ maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_GETMINFO, 2, &partnum);
+ }
+ return;
+
+fail_mtd_register:
+ dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
+ "error is 0x%X\n", mdev->port, mdev->unit, error);
+ for (error = 0; error <= card->partition; error++) {
+ kfree(((card->parts)[error]).pcache);
+ ((card->parts)[error]).pcache = NULL;
+ }
+fail_cache_create:
+fail_mpart:
+ for (error = 0; error <= card->partition; error++) {
+ kfree(((card->mtd)[error]).priv);
+ ((card->mtd)[error]).priv = NULL;
+ }
+ maple_getcond_callback(mdev, NULL, 0,
+ MAPLE_FUNC_MEMCARD);
+ kfree(part_cur->name);
+fail_name:
+ return;
+}
+
+/* Handles very basic info about the flash, queries for details */
+static int __devinit vmu_connect(struct maple_device *mdev)
+{
+ unsigned long test_flash_data, basic_flash_data;
+ int c, error;
+ struct memcard *card;
+ u32 partnum = 0;
+
+ test_flash_data = be32_to_cpu(mdev->devinfo.function);
+ /* Need to count how many bits are set - to find out which
+ * function_data element has details of the memory card:
+ * using Brian Kernighan's/Peter Wegner's method */
+ for (c = 0; test_flash_data; c++)
+ test_flash_data &= test_flash_data - 1;
+
+ basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
+
+ card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
+ if (!card) {
+ error = ENOMEM;
+ goto fail_nomem;
+ }
+
+ card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
+ card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
+ card->writecnt = basic_flash_data >> 12 & 0xF;
+ card->readcnt = basic_flash_data >> 8 & 0xF;
+ card->removeable = basic_flash_data >> 7 & 1;
+
+ card->partition = 0;
+
+ /*
+ * Not sure there are actually any multi-partition devices in the
+ * real world, but the hardware supports them, so, so will we
+ */
+ card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
+ GFP_KERNEL);
+ if (!card->parts) {
+ error = -ENOMEM;
+ goto fail_partitions;
+ }
+
+ card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
+ GFP_KERNEL);
+ if (!card->mtd) {
+ error = -ENOMEM;
+ goto fail_mtd_info;
+ }
+
+ maple_set_drvdata(mdev, card);
+
+ /*
+ * We want to trap meminfo not get cond
+ * so set interval to zero, but rely on maple bus
+ * driver to pass back the results of the meminfo
+ */
+ maple_getcond_callback(mdev, vmu_queryblocks, 0,
+ MAPLE_FUNC_MEMCARD);
+
+ /* Make sure we are clear to go */
+ if (atomic_read(&mdev->busy) == 1) {
+ wait_event_interruptible_timeout(mdev->maple_wait,
+ atomic_read(&mdev->busy) == 0, HZ);
+ if (atomic_read(&mdev->busy) == 1) {
+ dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
+ mdev->port, mdev->unit);
+ error = -EAGAIN;
+ goto fail_device_busy;
+ }
+ }
+
+ atomic_set(&mdev->busy, 1);
+
+ /*
+ * Set up the minfo call: vmu_queryblocks will handle
+ * the information passed back
+ */
+ error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
+ MAPLE_COMMAND_GETMINFO, 2, &partnum);
+ if (error) {
+ dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
+ " error is 0x%X\n", mdev->port, mdev->unit, error);
+ goto fail_mtd_info;
+ }
+ return 0;
+
+fail_device_busy:
+ kfree(card->mtd);
+fail_mtd_info:
+ kfree(card->parts);
+fail_partitions:
+ kfree(card);
+fail_nomem:
+ return error;
+}
+
+static void __devexit vmu_disconnect(struct maple_device *mdev)
+{
+ struct memcard *card;
+ struct mdev_part *mpart;
+ int x;
+
+ mdev->callback = NULL;
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mpart = ((card->mtd)[x]).priv;
+ mpart->mdev = NULL;
+ del_mtd_device(&((card->mtd)[x]));
+ kfree(((card->parts)[x]).name);
+ }
+ kfree(card->parts);
+ kfree(card->mtd);
+ kfree(card);
+}
+
+/* Callback to handle eccentricities of both mtd subsystem
+ * and general flakyness of Dreamcast VMUs
+ */
+static int vmu_can_unload(struct maple_device *mdev)
+{
+ struct memcard *card;
+ int x;
+ struct mtd_info *mtd;
+
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mtd = &((card->mtd)[x]);
+ if (mtd->usecount > 0)
+ return 0;
+ }
+ return 1;
+}
+
+#define ERRSTR "VMU at (%d, %d) file error -"
+
+static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
+{
+ enum maple_file_errors error = ((int *)recvbuf)[1];
+
+ switch (error) {
+
+ case MAPLE_FILEERR_INVALID_PARTITION:
+ dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_PHASE_ERROR:
+ dev_notice(&mdev->dev, ERRSTR " phase error\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_INVALID_BLOCK:
+ dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_WRITE_ERROR:
+ dev_notice(&mdev->dev, ERRSTR " write error\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
+ dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
+ mdev->port, mdev->unit);
+ break;
+
+ case MAPLE_FILEERR_BAD_CRC:
+ dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
+ mdev->port, mdev->unit);
+ break;
+
+ default:
+ dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
+ mdev->port, mdev->unit, error);
+ }
+}
+
+
+static int __devinit probe_maple_vmu(struct device *dev)
+{
+ int error;
+ struct maple_device *mdev = to_maple_dev(dev);
+ struct maple_driver *mdrv = to_maple_driver(dev->driver);
+
+ mdev->can_unload = vmu_can_unload;
+ mdev->fileerr_handler = vmu_file_error;
+ mdev->driver = mdrv;
+
+ error = vmu_connect(mdev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int __devexit remove_maple_vmu(struct device *dev)
+{
+ struct maple_device *mdev = to_maple_dev(dev);
+
+ vmu_disconnect(mdev);
+ return 0;
+}
+
+static struct maple_driver vmu_flash_driver = {
+ .function = MAPLE_FUNC_MEMCARD,
+ .drv = {
+ .name = "Dreamcast_visual_memory",
+ .probe = probe_maple_vmu,
+ .remove = __devexit_p(remove_maple_vmu),
+ },
+};
+
+static int __init vmu_flash_map_init(void)
+{
+ return maple_driver_register(&vmu_flash_driver);
+}
+
+static void __exit vmu_flash_map_exit(void)
+{
+ maple_driver_unregister(&vmu_flash_driver);
+}
+
+module_init(vmu_flash_map_init);
+module_exit(vmu_flash_map_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian McMenamin");
+MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 1c3fc6b428e..4898f7fe851 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -28,7 +28,7 @@
#include <asm/rtc.h>
#define DRV_NAME "sh-rtc"
-#define DRV_VERSION "0.2.0"
+#define DRV_VERSION "0.2.1"
#define RTC_REG(r) ((r) * rtc_reg_size)
@@ -99,56 +99,51 @@ struct sh_rtc {
unsigned short periodic_freq;
};
-static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
+static int __sh_rtc_interrupt(struct sh_rtc *rtc)
{
- struct sh_rtc *rtc = dev_id;
- unsigned int tmp;
-
- spin_lock(&rtc->lock);
+ unsigned int tmp, pending;
tmp = readb(rtc->regbase + RCR1);
+ pending = tmp & RCR1_CF;
tmp &= ~RCR1_CF;
writeb(tmp, rtc->regbase + RCR1);
/* Users have requested One x Second IRQ */
- if (rtc->periodic_freq & PF_OXS)
+ if (pending && rtc->periodic_freq & PF_OXS)
rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
- spin_unlock(&rtc->lock);
-
- return IRQ_HANDLED;
+ return pending;
}
-static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
+static int __sh_rtc_alarm(struct sh_rtc *rtc)
{
- struct sh_rtc *rtc = dev_id;
- unsigned int tmp;
-
- spin_lock(&rtc->lock);
+ unsigned int tmp, pending;
tmp = readb(rtc->regbase + RCR1);
+ pending = tmp & RCR1_AF;
tmp &= ~(RCR1_AF | RCR1_AIE);
- writeb(tmp, rtc->regbase + RCR1);
-
- rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+ writeb(tmp, rtc->regbase + RCR1);
- spin_unlock(&rtc->lock);
+ if (pending)
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
- return IRQ_HANDLED;
+ return pending;
}
-static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
+static int __sh_rtc_periodic(struct sh_rtc *rtc)
{
- struct sh_rtc *rtc = dev_id;
struct rtc_device *rtc_dev = rtc->rtc_dev;
- unsigned int tmp;
-
- spin_lock(&rtc->lock);
+ struct rtc_task *irq_task;
+ unsigned int tmp, pending;
tmp = readb(rtc->regbase + RCR2);
+ pending = tmp & RCR2_PEF;
tmp &= ~RCR2_PEF;
writeb(tmp, rtc->regbase + RCR2);
+ if (!pending)
+ return 0;
+
/* Half period enabled than one skipped and the next notified */
if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT))
rtc->periodic_freq &= ~PF_COUNT;
@@ -157,16 +152,65 @@ static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
rtc->periodic_freq |= PF_COUNT;
if (rtc->periodic_freq & PF_KOU) {
spin_lock(&rtc_dev->irq_task_lock);
- if (rtc_dev->irq_task)
- rtc_dev->irq_task->func(rtc_dev->irq_task->private_data);
+ irq_task = rtc_dev->irq_task;
+ if (irq_task)
+ irq_task->func(irq_task->private_data);
spin_unlock(&rtc_dev->irq_task_lock);
} else
rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
}
+ return pending;
+}
+
+static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
+{
+ struct sh_rtc *rtc = dev_id;
+ int ret;
+
+ spin_lock(&rtc->lock);
+ ret = __sh_rtc_interrupt(rtc);
+ spin_unlock(&rtc->lock);
+
+ return IRQ_RETVAL(ret);
+}
+
+static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
+{
+ struct sh_rtc *rtc = dev_id;
+ int ret;
+
+ spin_lock(&rtc->lock);
+ ret = __sh_rtc_alarm(rtc);
+ spin_unlock(&rtc->lock);
+
+ return IRQ_RETVAL(ret);
+}
+
+static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
+{
+ struct sh_rtc *rtc = dev_id;
+ int ret;
+
+ spin_lock(&rtc->lock);
+ ret = __sh_rtc_periodic(rtc);
spin_unlock(&rtc->lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(ret);
+}
+
+static irqreturn_t sh_rtc_shared(int irq, void *dev_id)
+{
+ struct sh_rtc *rtc = dev_id;
+ int ret;
+
+ spin_lock(&rtc->lock);
+ ret = __sh_rtc_interrupt(rtc);
+ ret |= __sh_rtc_alarm(rtc);
+ ret |= __sh_rtc_periodic(rtc);
+ spin_unlock(&rtc->lock);
+
+ return IRQ_RETVAL(ret);
}
static inline void sh_rtc_setpie(struct device *dev, unsigned int enable)
@@ -275,6 +319,25 @@ static int sh_rtc_proc(struct device *dev, struct seq_file *seq)
return 0;
}
+static inline void sh_rtc_setcie(struct device *dev, unsigned int enable)
+{
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int tmp;
+
+ spin_lock_irq(&rtc->lock);
+
+ tmp = readb(rtc->regbase + RCR1);
+
+ if (!enable)
+ tmp &= ~RCR1_CIE;
+ else
+ tmp |= RCR1_CIE;
+
+ writeb(tmp, rtc->regbase + RCR1);
+
+ spin_unlock_irq(&rtc->lock);
+}
+
static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
@@ -291,9 +354,11 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
break;
case RTC_UIE_OFF:
rtc->periodic_freq &= ~PF_OXS;
+ sh_rtc_setcie(dev, 0);
break;
case RTC_UIE_ON:
rtc->periodic_freq |= PF_OXS;
+ sh_rtc_setcie(dev, 1);
break;
case RTC_IRQP_READ:
ret = put_user(rtc->rtc_dev->irq_freq,
@@ -356,18 +421,17 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec--;
#endif
+ /* only keep the carry interrupt enabled if UIE is on */
+ if (!(rtc->periodic_freq & PF_OXS))
+ sh_rtc_setcie(dev, 0);
+
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
- if (rtc_valid_tm(tm) < 0) {
- dev_err(dev, "invalid date\n");
- rtc_time_to_tm(0, tm);
- }
-
- return 0;
+ return rtc_valid_tm(tm);
}
static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -572,7 +636,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
{
struct sh_rtc *rtc;
struct resource *res;
- unsigned int tmp;
+ struct rtc_time r;
int ret;
rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL);
@@ -585,26 +649,12 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (unlikely(ret <= 0)) {
ret = -ENOENT;
- dev_err(&pdev->dev, "No IRQ for period\n");
+ dev_err(&pdev->dev, "No IRQ resource\n");
goto err_badres;
}
rtc->periodic_irq = ret;
-
- ret = platform_get_irq(pdev, 1);
- if (unlikely(ret <= 0)) {
- ret = -ENOENT;
- dev_err(&pdev->dev, "No IRQ for carry\n");
- goto err_badres;
- }
- rtc->carry_irq = ret;
-
- ret = platform_get_irq(pdev, 2);
- if (unlikely(ret <= 0)) {
- ret = -ENOENT;
- dev_err(&pdev->dev, "No IRQ for alarm\n");
- goto err_badres;
- }
- rtc->alarm_irq = ret;
+ rtc->carry_irq = platform_get_irq(pdev, 1);
+ rtc->alarm_irq = platform_get_irq(pdev, 2);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (unlikely(res == NULL)) {
@@ -646,47 +696,66 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
}
rtc->rtc_dev->max_user_freq = 256;
- rtc->rtc_dev->irq_freq = 1;
- rtc->periodic_freq = 0x60;
platform_set_drvdata(pdev, rtc);
- /* register periodic/carry/alarm irqs */
- ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, IRQF_DISABLED,
- "sh-rtc period", rtc);
- if (unlikely(ret)) {
- dev_err(&pdev->dev,
- "request period IRQ failed with %d, IRQ %d\n", ret,
- rtc->periodic_irq);
- goto err_unmap;
- }
+ if (rtc->carry_irq <= 0) {
+ /* register shared periodic/carry/alarm irq */
+ ret = request_irq(rtc->periodic_irq, sh_rtc_shared,
+ IRQF_DISABLED, "sh-rtc", rtc);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev,
+ "request IRQ failed with %d, IRQ %d\n", ret,
+ rtc->periodic_irq);
+ goto err_unmap;
+ }
+ } else {
+ /* register periodic/carry/alarm irqs */
+ ret = request_irq(rtc->periodic_irq, sh_rtc_periodic,
+ IRQF_DISABLED, "sh-rtc period", rtc);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev,
+ "request period IRQ failed with %d, IRQ %d\n",
+ ret, rtc->periodic_irq);
+ goto err_unmap;
+ }
- ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, IRQF_DISABLED,
- "sh-rtc carry", rtc);
- if (unlikely(ret)) {
- dev_err(&pdev->dev,
- "request carry IRQ failed with %d, IRQ %d\n", ret,
- rtc->carry_irq);
- free_irq(rtc->periodic_irq, rtc);
- goto err_unmap;
- }
+ ret = request_irq(rtc->carry_irq, sh_rtc_interrupt,
+ IRQF_DISABLED, "sh-rtc carry", rtc);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev,
+ "request carry IRQ failed with %d, IRQ %d\n",
+ ret, rtc->carry_irq);
+ free_irq(rtc->periodic_irq, rtc);
+ goto err_unmap;
+ }
- ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, IRQF_DISABLED,
- "sh-rtc alarm", rtc);
- if (unlikely(ret)) {
- dev_err(&pdev->dev,
- "request alarm IRQ failed with %d, IRQ %d\n", ret,
- rtc->alarm_irq);
- free_irq(rtc->carry_irq, rtc);
- free_irq(rtc->periodic_irq, rtc);
- goto err_unmap;
+ ret = request_irq(rtc->alarm_irq, sh_rtc_alarm,
+ IRQF_DISABLED, "sh-rtc alarm", rtc);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev,
+ "request alarm IRQ failed with %d, IRQ %d\n",
+ ret, rtc->alarm_irq);
+ free_irq(rtc->carry_irq, rtc);
+ free_irq(rtc->periodic_irq, rtc);
+ goto err_unmap;
+ }
}
- tmp = readb(rtc->regbase + RCR1);
- tmp &= ~RCR1_CF;
- tmp |= RCR1_CIE;
- writeb(tmp, rtc->regbase + RCR1);
+ /* everything disabled by default */
+ rtc->periodic_freq = 0;
+ rtc->rtc_dev->irq_freq = 0;
+ sh_rtc_setpie(&pdev->dev, 0);
+ sh_rtc_setaie(&pdev->dev, 0);
+ sh_rtc_setcie(&pdev->dev, 0);
+
+ /* reset rtc to epoch 0 if time is invalid */
+ if (rtc_read_time(rtc->rtc_dev, &r) < 0) {
+ rtc_time_to_tm(0, &r);
+ rtc_set_time(rtc->rtc_dev, &r);
+ }
+ device_init_wakeup(&pdev->dev, 1);
return 0;
err_unmap:
@@ -708,10 +777,13 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev)
sh_rtc_setpie(&pdev->dev, 0);
sh_rtc_setaie(&pdev->dev, 0);
+ sh_rtc_setcie(&pdev->dev, 0);
- free_irq(rtc->carry_irq, rtc);
free_irq(rtc->periodic_irq, rtc);
- free_irq(rtc->alarm_irq, rtc);
+ if (rtc->carry_irq > 0) {
+ free_irq(rtc->carry_irq, rtc);
+ free_irq(rtc->alarm_irq, rtc);
+ }
release_resource(rtc->res);
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 557b54ab2f2..dbf5357a77b 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -139,7 +139,7 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
} while (!(status & SCxSR_TDxE(port)));
sci_in(port, SCxSR); /* Dummy read */
- sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
+ sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
sci_out(port, SCxTDR, c);
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
@@ -263,6 +263,7 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786) || \
defined(CONFIG_CPU_SUBTYPE_SHX3)
static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
@@ -284,7 +285,8 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785)
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
static inline int scif_txroom(struct uart_port *port)
{
return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff);
@@ -1095,6 +1097,7 @@ static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
struct uart_port *port = &serial_console_port->port;
+ unsigned short bits;
int i;
for (i = 0; i < count; i++) {
@@ -1103,6 +1106,11 @@ static void serial_console_write(struct console *co, const char *s,
sci_poll_put_char(port, *s++);
}
+
+ /* wait until fifo is empty and last bit has been transmitted */
+ bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
+ while ((sci_in(port, SCxSR) & bits) != bits)
+ cpu_relax();
}
static int __init serial_console_setup(struct console *co, char *options)
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 022e89ffec1..d0aa82d7fce 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -1,6 +1,6 @@
#include <linux/serial_core.h>
#include <asm/io.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
#include <asm/regs306x.h>
@@ -126,7 +126,8 @@
# define SCSPTR1 0xffe10024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7785)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */
# define SCSPTR2 0xffec0024 /* 16 bit SCIF */
@@ -182,6 +183,7 @@
defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786) || \
defined(CONFIG_CPU_SUBTYPE_SHX3)
#define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */
#else
@@ -413,7 +415,8 @@ SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8)
SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16)
#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785)
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
@@ -644,7 +647,8 @@ static inline int sci_rxd_in(struct uart_port *port)
return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7785)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffea0000)
@@ -746,7 +750,8 @@ static inline int sci_rxd_in(struct uart_port *port)
*/
#if defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785)
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1)
#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 58d24c5a76c..2269fbcaa18 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -568,6 +568,10 @@ static void __init intc_register_irq(struct intc_desc *desc,
if (!data[0] && data[1])
primary = 1;
+ if (!data[0] && !data[1])
+ pr_warning("intc: missing unique irq mask for "
+ "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
+
data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
@@ -641,6 +645,17 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
return 0;
}
+static unsigned char *intc_evt2irq_table;
+
+unsigned int intc_evt2irq(unsigned int vector)
+{
+ unsigned int irq = evt2irq(vector);
+
+ if (intc_evt2irq_table && intc_evt2irq_table[irq])
+ irq = intc_evt2irq_table[irq];
+
+ return irq;
+}
void __init register_intc_controller(struct intc_desc *desc)
{
@@ -705,9 +720,41 @@ void __init register_intc_controller(struct intc_desc *desc)
BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
+ /* keep the first vector only if same enum is used multiple times */
+ for (i = 0; i < desc->nr_vectors; i++) {
+ struct intc_vect *vect = desc->vectors + i;
+ int first_irq = evt2irq(vect->vect);
+
+ if (!vect->enum_id)
+ continue;
+
+ for (k = i + 1; k < desc->nr_vectors; k++) {
+ struct intc_vect *vect2 = desc->vectors + k;
+
+ if (vect->enum_id != vect2->enum_id)
+ continue;
+
+ vect2->enum_id = 0;
+
+ if (!intc_evt2irq_table)
+ intc_evt2irq_table = alloc_bootmem(NR_IRQS);
+
+ if (!intc_evt2irq_table) {
+ pr_warning("intc: cannot allocate evt2irq!\n");
+ continue;
+ }
+
+ intc_evt2irq_table[evt2irq(vect2->vect)] = first_irq;
+ }
+ }
+
+ /* register the vectors one by one */
for (i = 0; i < desc->nr_vectors; i++) {
struct intc_vect *vect = desc->vectors + i;
+ if (!vect->enum_id)
+ continue;
+
intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect));
}
}
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 7e1257af3d4..cab1ab7cfb7 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -1,16 +1,10 @@
/*
* Core maple bus functionality
*
- * Copyright (C) 2007, 2008 Adrian McMenamin
+ * Copyright (C) 2007 - 2009 Adrian McMenamin
* Copyright (C) 2001 - 2008 Paul Mundt
- *
- * Based on 2.4 code by:
- *
- * Copyright (C) 2000-2001 YAEGASHI Takeshi
+ * Copyright (C) 2000 - 2001 YAEGASHI Takeshi
* Copyright (C) 2001 M. R. Brown
- * Copyright (C) 2001 Paul Mundt
- *
- * and others.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -32,7 +26,7 @@
#include <mach/dma.h>
#include <mach/sysasic.h>
-MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin");
+MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
MODULE_LICENSE("GPL v2");
MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
@@ -49,7 +43,7 @@ static LIST_HEAD(maple_sentq);
/* mutex to protect queue of waiting packets */
static DEFINE_MUTEX(maple_wlist_lock);
-static struct maple_driver maple_dummy_driver;
+static struct maple_driver maple_unsupported_device;
static struct device maple_bus;
static int subdevice_map[MAPLE_PORTS];
static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
@@ -62,8 +56,9 @@ struct maple_device_specify {
int unit;
};
-static bool checked[4];
-static struct maple_device *baseunits[4];
+static bool checked[MAPLE_PORTS];
+static bool empty[MAPLE_PORTS];
+static struct maple_device *baseunits[MAPLE_PORTS];
/**
* maple_driver_register - register a maple driver
@@ -97,12 +92,20 @@ void maple_driver_unregister(struct maple_driver *drv)
EXPORT_SYMBOL_GPL(maple_driver_unregister);
/* set hardware registers to enable next round of dma */
-static void maplebus_dma_reset(void)
+static void maple_dma_reset(void)
{
ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
ctrl_outl(1, MAPLE_TRIGTYPE);
- ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
+ /*
+ * Maple system register
+ * bits 31 - 16 timeout in units of 20nsec
+ * bit 12 hard trigger - set 0 to keep responding to VBLANK
+ * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
+ * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
+ * max delay is 11
+ */
+ ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
ctrl_outl(1, MAPLE_ENABLE);
}
@@ -134,21 +137,16 @@ static void maple_release_device(struct device *dev)
{
struct maple_device *mdev;
struct mapleq *mq;
- if (!dev)
- return;
+
mdev = to_maple_dev(dev);
mq = mdev->mq;
- if (mq) {
- if (mq->recvbufdcsp)
- kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
- kfree(mq);
- mq = NULL;
- }
+ kmem_cache_free(maple_queue_cache, mq->recvbuf);
+ kfree(mq);
kfree(mdev);
}
/**
- * maple_add_packet - add a single instruction to the queue
+ * maple_add_packet - add a single instruction to the maple bus queue
* @mdev: maple device
* @function: function on device being queried
* @command: maple command to add
@@ -158,68 +156,12 @@ static void maple_release_device(struct device *dev)
int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
size_t length, void *data)
{
- int locking, ret = 0;
+ int ret = 0;
void *sendbuf = NULL;
- mutex_lock(&maple_wlist_lock);
- /* bounce if device already locked */
- locking = mutex_is_locked(&mdev->mq->mutex);
- if (locking) {
- ret = -EBUSY;
- goto out;
- }
-
- mutex_lock(&mdev->mq->mutex);
-
if (length) {
- sendbuf = kmalloc(length * 4, GFP_KERNEL);
+ sendbuf = kzalloc(length * 4, GFP_KERNEL);
if (!sendbuf) {
- mutex_unlock(&mdev->mq->mutex);
- ret = -ENOMEM;
- goto out;
- }
- ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
- }
-
- mdev->mq->command = command;
- mdev->mq->length = length;
- if (length > 1)
- memcpy(sendbuf + 4, data, (length - 1) * 4);
- mdev->mq->sendbuf = sendbuf;
-
- list_add(&mdev->mq->list, &maple_waitq);
-out:
- mutex_unlock(&maple_wlist_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(maple_add_packet);
-
-/**
- * maple_add_packet_sleeps - add a single instruction to the queue
- * @mdev: maple device
- * @function: function on device being queried
- * @command: maple command to add
- * @length: length of command string (in 32 bit words)
- * @data: remainder of command string
- *
- * Same as maple_add_packet(), but waits for the lock to become free.
- */
-int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
- u32 command, size_t length, void *data)
-{
- int locking, ret = 0;
- void *sendbuf = NULL;
-
- locking = mutex_lock_interruptible(&mdev->mq->mutex);
- if (locking) {
- ret = -EIO;
- goto out;
- }
-
- if (length) {
- sendbuf = kmalloc(length * 4, GFP_KERNEL);
- if (!sendbuf) {
- mutex_unlock(&mdev->mq->mutex);
ret = -ENOMEM;
goto out;
}
@@ -233,38 +175,35 @@ int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
mdev->mq->sendbuf = sendbuf;
mutex_lock(&maple_wlist_lock);
- list_add(&mdev->mq->list, &maple_waitq);
+ list_add_tail(&mdev->mq->list, &maple_waitq);
mutex_unlock(&maple_wlist_lock);
out:
return ret;
}
-EXPORT_SYMBOL_GPL(maple_add_packet_sleeps);
+EXPORT_SYMBOL_GPL(maple_add_packet);
static struct mapleq *maple_allocq(struct maple_device *mdev)
{
struct mapleq *mq;
- mq = kmalloc(sizeof(*mq), GFP_KERNEL);
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
goto failed_nomem;
+ INIT_LIST_HEAD(&mq->list);
mq->dev = mdev;
- mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
- mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
+ mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
if (!mq->recvbuf)
goto failed_p2;
- /*
- * most devices do not need the mutex - but
- * anything that injects block reads or writes
- * will rely on it
- */
- mutex_init(&mq->mutex);
+ mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
return mq;
failed_p2:
kfree(mq);
failed_nomem:
+ dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
+ mdev->port, mdev->unit);
return NULL;
}
@@ -272,12 +211,16 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
{
struct maple_device *mdev;
+ /* zero this out to avoid kobj subsystem
+ * thinking it has already been registered */
+
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return NULL;
mdev->port = port;
mdev->unit = unit;
+
mdev->mq = maple_allocq(mdev);
if (!mdev->mq) {
@@ -286,19 +229,14 @@ static struct maple_device *maple_alloc_dev(int port, int unit)
}
mdev->dev.bus = &maple_bus_type;
mdev->dev.parent = &maple_bus;
+ init_waitqueue_head(&mdev->maple_wait);
return mdev;
}
static void maple_free_dev(struct maple_device *mdev)
{
- if (!mdev)
- return;
- if (mdev->mq) {
- if (mdev->mq->recvbufdcsp)
- kmem_cache_free(maple_queue_cache,
- mdev->mq->recvbufdcsp);
- kfree(mdev->mq);
- }
+ kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
+ kfree(mdev->mq);
kfree(mdev);
}
@@ -320,7 +258,7 @@ static void maple_build_block(struct mapleq *mq)
maple_lastptr = maple_sendptr;
*maple_sendptr++ = (port << 16) | len | 0x80000000;
- *maple_sendptr++ = PHYSADDR(mq->recvbuf);
+ *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf);
*maple_sendptr++ =
mq->command | (to << 8) | (from << 16) | (len << 24);
while (len-- > 0)
@@ -333,20 +271,28 @@ static void maple_send(void)
int i, maple_packets = 0;
struct mapleq *mq, *nmq;
- if (!list_empty(&maple_sentq))
+ if (!maple_dma_done())
return;
+
+ /* disable DMA */
+ ctrl_outl(0, MAPLE_ENABLE);
+
+ if (!list_empty(&maple_sentq))
+ goto finish;
+
mutex_lock(&maple_wlist_lock);
- if (list_empty(&maple_waitq) || !maple_dma_done()) {
+ if (list_empty(&maple_waitq)) {
mutex_unlock(&maple_wlist_lock);
- return;
+ goto finish;
}
- mutex_unlock(&maple_wlist_lock);
+
maple_lastptr = maple_sendbuf;
maple_sendptr = maple_sendbuf;
- mutex_lock(&maple_wlist_lock);
+
list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
maple_build_block(mq);
- list_move(&mq->list, &maple_sentq);
+ list_del_init(&mq->list);
+ list_add_tail(&mq->list, &maple_sentq);
if (maple_packets++ > MAPLE_MAXPACKETS)
break;
}
@@ -356,10 +302,13 @@ static void maple_send(void)
dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
+
+finish:
+ maple_dma_reset();
}
/* check if there is a driver registered likely to match this device */
-static int check_matching_maple_driver(struct device_driver *driver,
+static int maple_check_matching_driver(struct device_driver *driver,
void *devptr)
{
struct maple_driver *maple_drv;
@@ -374,10 +323,7 @@ static int check_matching_maple_driver(struct device_driver *driver,
static void maple_detach_driver(struct maple_device *mdev)
{
- if (!mdev)
- return;
device_unregister(&mdev->dev);
- mdev = NULL;
}
/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
@@ -385,9 +331,9 @@ static void maple_attach_driver(struct maple_device *mdev)
{
char *p, *recvbuf;
unsigned long function;
- int matched, retval;
+ int matched, error;
- recvbuf = mdev->mq->recvbuf;
+ recvbuf = mdev->mq->recvbuf->buf;
/* copy the data as individual elements in
* case of memory optimisation */
memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
@@ -395,7 +341,6 @@ static void maple_attach_driver(struct maple_device *mdev)
memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
- memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
@@ -414,43 +359,39 @@ static void maple_attach_driver(struct maple_device *mdev)
else
break;
- printk(KERN_INFO "Maple device detected: %s\n",
- mdev->product_name);
- printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
-
function = be32_to_cpu(mdev->devinfo.function);
+ dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
+ mdev->product_name, function, mdev->port, mdev->unit);
+
if (function > 0x200) {
/* Do this silently - as not a real device */
function = 0;
- mdev->driver = &maple_dummy_driver;
+ mdev->driver = &maple_unsupported_device;
dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
} else {
- printk(KERN_INFO
- "Maple bus at (%d, %d): Function 0x%lX\n",
- mdev->port, mdev->unit, function);
-
matched =
bus_for_each_drv(&maple_bus_type, NULL, mdev,
- check_matching_maple_driver);
+ maple_check_matching_driver);
if (matched == 0) {
/* Driver does not exist yet */
- printk(KERN_INFO
- "No maple driver found.\n");
- mdev->driver = &maple_dummy_driver;
+ dev_info(&mdev->dev, "no driver found\n");
+ mdev->driver = &maple_unsupported_device;
}
dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
mdev->unit, function);
}
+
mdev->function = function;
mdev->dev.release = &maple_release_device;
- retval = device_register(&mdev->dev);
- if (retval) {
- printk(KERN_INFO
- "Maple bus: Attempt to register device"
- " (%x, %x) failed.\n",
- mdev->port, mdev->unit);
+
+ atomic_set(&mdev->busy, 0);
+ error = device_register(&mdev->dev);
+ if (error) {
+ dev_warn(&mdev->dev, "could not register device at"
+ " (%d, %d), with error 0x%X\n", mdev->unit,
+ mdev->port, error);
maple_free_dev(mdev);
mdev = NULL;
return;
@@ -462,7 +403,7 @@ static void maple_attach_driver(struct maple_device *mdev)
* port and unit then return 1 - allows identification
* of which devices need to be attached or detached
*/
-static int detach_maple_device(struct device *device, void *portptr)
+static int check_maple_device(struct device *device, void *portptr)
{
struct maple_device_specify *ds;
struct maple_device *mdev;
@@ -477,21 +418,25 @@ static int detach_maple_device(struct device *device, void *portptr)
static int setup_maple_commands(struct device *device, void *ignored)
{
int add;
- struct maple_device *maple_dev = to_maple_dev(device);
-
- if ((maple_dev->interval > 0)
- && time_after(jiffies, maple_dev->when)) {
- /* bounce if we cannot lock */
- add = maple_add_packet(maple_dev,
- be32_to_cpu(maple_dev->devinfo.function),
+ struct maple_device *mdev = to_maple_dev(device);
+ if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
+ time_after(jiffies, mdev->when)) {
+ /* bounce if we cannot add */
+ add = maple_add_packet(mdev,
+ be32_to_cpu(mdev->devinfo.function),
MAPLE_COMMAND_GETCOND, 1, NULL);
if (!add)
- maple_dev->when = jiffies + maple_dev->interval;
+ mdev->when = jiffies + mdev->interval;
} else {
if (time_after(jiffies, maple_pnp_time))
- /* This will also bounce */
- maple_add_packet(maple_dev, 0,
- MAPLE_COMMAND_DEVINFO, 0, NULL);
+ /* Ensure we don't have block reads and devinfo
+ * calls interfering with one another - so flag the
+ * device as busy */
+ if (atomic_read(&mdev->busy) == 0) {
+ atomic_set(&mdev->busy, 1);
+ maple_add_packet(mdev, 0,
+ MAPLE_COMMAND_DEVINFO, 0, NULL);
+ }
}
return 0;
}
@@ -499,29 +444,50 @@ static int setup_maple_commands(struct device *device, void *ignored)
/* VBLANK bottom half - implemented via workqueue */
static void maple_vblank_handler(struct work_struct *work)
{
- if (!list_empty(&maple_sentq) || !maple_dma_done())
+ int x, locking;
+ struct maple_device *mdev;
+
+ if (!maple_dma_done())
return;
ctrl_outl(0, MAPLE_ENABLE);
+ if (!list_empty(&maple_sentq))
+ goto finish;
+
+ /*
+ * Set up essential commands - to fetch data and
+ * check devices are still present
+ */
bus_for_each_dev(&maple_bus_type, NULL, NULL,
- setup_maple_commands);
+ setup_maple_commands);
+
+ if (time_after(jiffies, maple_pnp_time)) {
+ /*
+ * Scan the empty ports - bus is flakey and may have
+ * mis-reported emptyness
+ */
+ for (x = 0; x < MAPLE_PORTS; x++) {
+ if (checked[x] && empty[x]) {
+ mdev = baseunits[x];
+ if (!mdev)
+ break;
+ atomic_set(&mdev->busy, 1);
+ locking = maple_add_packet(mdev, 0,
+ MAPLE_COMMAND_DEVINFO, 0, NULL);
+ if (!locking)
+ break;
+ }
+ }
- if (time_after(jiffies, maple_pnp_time))
maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
-
- mutex_lock(&maple_wlist_lock);
- if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
- mutex_unlock(&maple_wlist_lock);
- maple_send();
- } else {
- mutex_unlock(&maple_wlist_lock);
}
- maplebus_dma_reset();
+finish:
+ maple_send();
}
-/* handle devices added via hotplugs - placing them on queue for DEVINFO*/
+/* handle devices added via hotplugs - placing them on queue for DEVINFO */
static void maple_map_subunits(struct maple_device *mdev, int submask)
{
int retval, k, devcheck;
@@ -533,7 +499,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
ds.unit = k + 1;
retval =
bus_for_each_dev(&maple_bus_type, NULL, &ds,
- detach_maple_device);
+ check_maple_device);
if (retval) {
submask = submask >> 1;
continue;
@@ -543,6 +509,7 @@ static void maple_map_subunits(struct maple_device *mdev, int submask)
mdev_add = maple_alloc_dev(mdev->port, k + 1);
if (!mdev_add)
return;
+ atomic_set(&mdev_add->busy, 1);
maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
0, NULL);
/* mark that we are checking sub devices */
@@ -564,27 +531,45 @@ static void maple_clean_submap(struct maple_device *mdev)
}
/* handle empty port or hotplug removal */
-static void maple_response_none(struct maple_device *mdev,
- struct mapleq *mq)
-{
- if (mdev->unit != 0) {
- list_del(&mq->list);
- maple_clean_submap(mdev);
- printk(KERN_INFO
- "Maple bus device detaching at (%d, %d)\n",
- mdev->port, mdev->unit);
+static void maple_response_none(struct maple_device *mdev)
+{
+ maple_clean_submap(mdev);
+
+ if (likely(mdev->unit != 0)) {
+ /*
+ * Block devices play up
+ * and give the impression they have
+ * been removed even when still in place or
+ * trip the mtd layer when they have
+ * really gone - this code traps that eventuality
+ * and ensures we aren't overloaded with useless
+ * error messages
+ */
+ if (mdev->can_unload) {
+ if (!mdev->can_unload(mdev)) {
+ atomic_set(&mdev->busy, 2);
+ wake_up(&mdev->maple_wait);
+ return;
+ }
+ }
+
+ dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
+ mdev->port, mdev->unit);
maple_detach_driver(mdev);
return;
- }
- if (!started || !fullscan) {
- if (checked[mdev->port] == false) {
- checked[mdev->port] = true;
- printk(KERN_INFO "No maple devices attached"
- " to port %d\n", mdev->port);
+ } else {
+ if (!started || !fullscan) {
+ if (checked[mdev->port] == false) {
+ checked[mdev->port] = true;
+ empty[mdev->port] = true;
+ dev_info(&mdev->dev, "no devices"
+ " to port %d\n", mdev->port);
+ }
+ return;
}
- return;
}
- maple_clean_submap(mdev);
+ /* Some hardware devices generate false detach messages on unit 0 */
+ atomic_set(&mdev->busy, 0);
}
/* preprocess hotplugs or scans */
@@ -599,8 +584,11 @@ static void maple_response_devinfo(struct maple_device *mdev,
} else {
if (mdev->unit != 0)
maple_attach_driver(mdev);
+ if (mdev->unit == 0) {
+ empty[mdev->port] = false;
+ maple_attach_driver(mdev);
+ }
}
- return;
}
if (mdev->unit == 0) {
submask = recvbuf[2] & 0x1F;
@@ -611,6 +599,17 @@ static void maple_response_devinfo(struct maple_device *mdev,
}
}
+static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
+{
+ if (mdev->fileerr_handler) {
+ mdev->fileerr_handler(mdev, recvbuf);
+ return;
+ } else
+ dev_warn(&mdev->dev, "device at (%d, %d) reports"
+ "file error 0x%X\n", mdev->port, mdev->unit,
+ ((int *)recvbuf)[1]);
+}
+
static void maple_port_rescan(void)
{
int i;
@@ -621,12 +620,6 @@ static void maple_port_rescan(void)
if (checked[i] == false) {
fullscan = 0;
mdev = baseunits[i];
- /*
- * test lock in case scan has failed
- * but device is still locked
- */
- if (mutex_is_locked(&mdev->mq->mutex))
- mutex_unlock(&mdev->mq->mutex);
maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
0, NULL);
}
@@ -637,7 +630,7 @@ static void maple_port_rescan(void)
static void maple_dma_handler(struct work_struct *work)
{
struct mapleq *mq, *nmq;
- struct maple_device *dev;
+ struct maple_device *mdev;
char *recvbuf;
enum maple_code code;
@@ -646,43 +639,56 @@ static void maple_dma_handler(struct work_struct *work)
ctrl_outl(0, MAPLE_ENABLE);
if (!list_empty(&maple_sentq)) {
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
- recvbuf = mq->recvbuf;
+ mdev = mq->dev;
+ recvbuf = mq->recvbuf->buf;
+ dma_cache_sync(&mdev->dev, recvbuf, 0x400,
+ DMA_FROM_DEVICE);
code = recvbuf[0];
- dev = mq->dev;
kfree(mq->sendbuf);
- mutex_unlock(&mq->mutex);
list_del_init(&mq->list);
-
switch (code) {
case MAPLE_RESPONSE_NONE:
- maple_response_none(dev, mq);
+ maple_response_none(mdev);
break;
case MAPLE_RESPONSE_DEVINFO:
- maple_response_devinfo(dev, recvbuf);
+ maple_response_devinfo(mdev, recvbuf);
+ atomic_set(&mdev->busy, 0);
break;
case MAPLE_RESPONSE_DATATRF:
- if (dev->callback)
- dev->callback(mq);
+ if (mdev->callback)
+ mdev->callback(mq);
+ atomic_set(&mdev->busy, 0);
+ wake_up(&mdev->maple_wait);
break;
case MAPLE_RESPONSE_FILEERR:
+ maple_response_fileerr(mdev, recvbuf);
+ atomic_set(&mdev->busy, 0);
+ wake_up(&mdev->maple_wait);
+ break;
+
case MAPLE_RESPONSE_AGAIN:
case MAPLE_RESPONSE_BADCMD:
case MAPLE_RESPONSE_BADFUNC:
- printk(KERN_DEBUG
- "Maple non-fatal error 0x%X\n",
- code);
+ dev_warn(&mdev->dev, "non-fatal error"
+ " 0x%X at (%d, %d)\n", code,
+ mdev->port, mdev->unit);
+ atomic_set(&mdev->busy, 0);
break;
case MAPLE_RESPONSE_ALLINFO:
- printk(KERN_DEBUG
- "Maple - extended device information"
- " not supported\n");
+ dev_notice(&mdev->dev, "extended"
+ " device information request for (%d, %d)"
+ " but call is not supported\n", mdev->port,
+ mdev->unit);
+ atomic_set(&mdev->busy, 0);
break;
case MAPLE_RESPONSE_OK:
+ atomic_set(&mdev->busy, 0);
+ wake_up(&mdev->maple_wait);
break;
default:
@@ -699,20 +705,19 @@ static void maple_dma_handler(struct work_struct *work)
if (!fullscan)
maple_port_rescan();
/* mark that we have been through the first scan */
- if (started == 0)
- started = 1;
+ started = 1;
}
- maplebus_dma_reset();
+ maple_send();
}
-static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
+static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
{
/* Load everything into the bottom half */
schedule_work(&maple_dma_process);
return IRQ_HANDLED;
}
-static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
+static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
{
schedule_work(&maple_vblank_process);
return IRQ_HANDLED;
@@ -720,14 +725,14 @@ static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
static int maple_set_dma_interrupt_handler(void)
{
- return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt,
- IRQF_SHARED, "maple bus DMA", &maple_dummy_driver);
+ return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
+ IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
}
static int maple_set_vblank_interrupt_handler(void)
{
- return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt,
- IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver);
+ return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
+ IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
}
static int maple_get_dma_buffer(void)
@@ -740,7 +745,7 @@ static int maple_get_dma_buffer(void)
return 0;
}
-static int match_maple_bus_driver(struct device *devptr,
+static int maple_match_bus_driver(struct device *devptr,
struct device_driver *drvptr)
{
struct maple_driver *maple_drv = to_maple_driver(drvptr);
@@ -765,16 +770,18 @@ static void maple_bus_release(struct device *dev)
{
}
-static struct maple_driver maple_dummy_driver = {
+static struct maple_driver maple_unsupported_device = {
.drv = {
- .name = "maple_dummy_driver",
+ .name = "maple_unsupported_device",
.bus = &maple_bus_type,
},
};
-
+/**
+ * maple_bus_type - core maple bus structure
+ */
struct bus_type maple_bus_type = {
.name = "maple",
- .match = match_maple_bus_driver,
+ .match = maple_match_bus_driver,
.uevent = maple_bus_uevent,
};
EXPORT_SYMBOL_GPL(maple_bus_type);
@@ -788,7 +795,8 @@ static int __init maple_bus_init(void)
{
int retval, i;
struct maple_device *mdev[MAPLE_PORTS];
- ctrl_outl(0, MAPLE_STATE);
+
+ ctrl_outl(0, MAPLE_ENABLE);
retval = device_register(&maple_bus);
if (retval)
@@ -798,36 +806,33 @@ static int __init maple_bus_init(void)
if (retval)
goto cleanup_device;
- retval = driver_register(&maple_dummy_driver.drv);
+ retval = driver_register(&maple_unsupported_device.drv);
if (retval)
goto cleanup_bus;
/* allocate memory for maple bus dma */
retval = maple_get_dma_buffer();
if (retval) {
- printk(KERN_INFO
- "Maple bus: Failed to allocate Maple DMA buffers\n");
+ dev_err(&maple_bus, "failed to allocate DMA buffers\n");
goto cleanup_basic;
}
/* set up DMA interrupt handler */
retval = maple_set_dma_interrupt_handler();
if (retval) {
- printk(KERN_INFO
- "Maple bus: Failed to grab maple DMA IRQ\n");
+ dev_err(&maple_bus, "bus failed to grab maple "
+ "DMA IRQ\n");
goto cleanup_dma;
}
/* set up VBLANK interrupt handler */
retval = maple_set_vblank_interrupt_handler();
if (retval) {
- printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
+ dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
goto cleanup_irq;
}
- maple_queue_cache =
- kmem_cache_create("maple_queue_cache", 0x400, 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
if (!maple_queue_cache)
goto cleanup_bothirqs;
@@ -838,23 +843,23 @@ static int __init maple_bus_init(void)
/* setup maple ports */
for (i = 0; i < MAPLE_PORTS; i++) {
checked[i] = false;
+ empty[i] = false;
mdev[i] = maple_alloc_dev(i, 0);
- baseunits[i] = mdev[i];
if (!mdev[i]) {
while (i-- > 0)
maple_free_dev(mdev[i]);
goto cleanup_cache;
}
+ baseunits[i] = mdev[i];
+ atomic_set(&mdev[i]->busy, 1);
maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
subdevice_map[i] = 0;
}
- /* setup maplebus hardware */
- maplebus_dma_reset();
- /* initial detection */
+ maple_pnp_time = jiffies + HZ;
+ /* prepare initial queue */
maple_send();
- maple_pnp_time = jiffies;
- printk(KERN_INFO "Maple bus core now registered.\n");
+ dev_info(&maple_bus, "bus core now registered\n");
return 0;
@@ -871,7 +876,7 @@ cleanup_dma:
free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
cleanup_basic:
- driver_unregister(&maple_dummy_driver.drv);
+ driver_unregister(&maple_unsupported_device.drv);
cleanup_bus:
bus_unregister(&maple_bus_type);
@@ -880,7 +885,7 @@ cleanup_device:
device_unregister(&maple_bus);
cleanup:
- printk(KERN_INFO "Maple bus registration failed\n");
+ printk(KERN_ERR "Maple bus registration failed\n");
return retval;
}
/* Push init to later to ensure hardware gets detected */
diff --git a/drivers/staging/go7007/snd-go7007.c b/drivers/staging/go7007/snd-go7007.c
index a7de401f61a..cd19be6c00e 100644
--- a/drivers/staging/go7007/snd-go7007.c
+++ b/drivers/staging/go7007/snd-go7007.c
@@ -248,10 +248,11 @@ int go7007_snd_init(struct go7007 *go)
spin_lock_init(&gosnd->lock);
gosnd->hw_ptr = gosnd->w_idx = gosnd->avail = 0;
gosnd->capturing = 0;
- gosnd->card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0);
- if (gosnd->card == NULL) {
+ ret = snd_card_create(index[dev], id[dev], THIS_MODULE, 0,
+ &gosnd->card);
+ if (ret < 0) {
kfree(gosnd);
- return -ENOMEM;
+ return ret;
}
ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
&go7007_snd_device_ops);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 83babb0a1df..c6c816b7ecb 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -47,6 +47,7 @@ config USB_ARCH_HAS_OHCI
default y if CPU_SUBTYPE_SH7720
default y if CPU_SUBTYPE_SH7721
default y if CPU_SUBTYPE_SH7763
+ default y if CPU_SUBTYPE_SH7786
# more:
default PCI
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 60d3f9e9b51..14e09abbddf 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -1099,10 +1099,9 @@ static int gmidi_register_card(struct gmidi_device *dev)
.dev_free = gmidi_snd_free,
};
- card = snd_card_new(index, id, THIS_MODULE, 0);
- if (!card) {
- ERROR(dev, "snd_card_new failed\n");
- err = -ENOMEM;
+ err = snd_card_create(index, id, THIS_MODULE, 0, &card);
+ if (err < 0) {
+ ERROR(dev, "snd_card_create failed\n");
goto fail;
}
dev->card = card;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 5cf5f1eca4f..7658589edb1 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1049,7 +1049,8 @@ MODULE_LICENSE ("GPL");
#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763)
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
#include "ohci-sh.c"
#define PLATFORM_DRIVER ohci_hcd_sh_driver
#endif
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 0a0fd48a856..53f8f1100e8 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -61,7 +61,7 @@
#include <mach-dreamcast/mach/sysasic.h>
#endif
-#ifdef CONFIG_SH_DMA
+#ifdef CONFIG_PVR2_DMA
#include <linux/pagemap.h>
#include <mach/dma.h>
#include <asm/dma.h>
@@ -188,7 +188,7 @@ static unsigned int is_blanked = 0; /* Is the screen blanked? */
static unsigned long pvr2fb_map;
#endif
-#ifdef CONFIG_SH_DMA
+#ifdef CONFIG_PVR2_DMA
static unsigned int shdma = PVR2_CASCADE_CHAN;
static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS;
#endif
@@ -207,7 +207,7 @@ static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id);
static int pvr2_init_cable(void);
static int pvr2_get_param(const struct pvr2_params *p, const char *s,
int val, int size);
-#ifdef CONFIG_SH_DMA
+#ifdef CONFIG_PVR2_DMA
static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
size_t count, loff_t *ppos);
#endif
@@ -218,7 +218,7 @@ static struct fb_ops pvr2fb_ops = {
.fb_blank = pvr2fb_blank,
.fb_check_var = pvr2fb_check_var,
.fb_set_par = pvr2fb_set_par,
-#ifdef CONFIG_SH_DMA
+#ifdef CONFIG_PVR2_DMA
.fb_write = pvr2fb_write,
#endif
.fb_fillrect = cfb_fillrect,
@@ -671,7 +671,7 @@ static int pvr2_init_cable(void)
return cable_type;
}
-#ifdef CONFIG_SH_DMA
+#ifdef CONFIG_PVR2_DMA
static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
size_t count, loff_t *ppos)
{
@@ -743,7 +743,7 @@ out_unmap:
return ret;
}
-#endif /* CONFIG_SH_DMA */
+#endif /* CONFIG_PVR2_DMA */
/**
* pvr2fb_common_init
@@ -893,7 +893,7 @@ static int __init pvr2fb_dc_init(void)
return -EBUSY;
}
-#ifdef CONFIG_SH_DMA
+#ifdef CONFIG_PVR2_DMA
if (request_dma(pvr2dma, "pvr2") != 0) {
free_irq(HW_EVENT_VSYNC, 0);
return -EBUSY;
@@ -915,7 +915,7 @@ static void __exit pvr2fb_dc_exit(void)
}
free_irq(HW_EVENT_VSYNC, 0);
-#ifdef CONFIG_SH_DMA
+#ifdef CONFIG_PVR2_DMA
free_dma(pvr2dma);
#endif
}
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 1d2636a898c..92ea0ab44ce 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -33,6 +33,8 @@ struct sh_mobile_lcdc_chan {
struct fb_info info;
dma_addr_t dma_handle;
struct fb_deferred_io defio;
+ unsigned long frame_end;
+ wait_queue_head_t frame_end_wait;
};
struct sh_mobile_lcdc_priv {
@@ -226,7 +228,10 @@ static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data)
{
struct sh_mobile_lcdc_priv *priv = data;
+ struct sh_mobile_lcdc_chan *ch;
unsigned long tmp;
+ int is_sub;
+ int k;
/* acknowledge interrupt */
tmp = lcdc_read(priv, _LDINTR);
@@ -234,8 +239,24 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data)
tmp |= 0x000000ff ^ LDINTR_FS; /* status in low 8 */
lcdc_write(priv, _LDINTR, tmp);
- /* disable clocks */
- sh_mobile_lcdc_clk_off(priv);
+ /* figure out if this interrupt is for main or sub lcd */
+ is_sub = (lcdc_read(priv, _LDSR) & (1 << 10)) ? 1 : 0;
+
+ /* wake up channel and disable clocks*/
+ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
+ ch = &priv->ch[k];
+
+ if (!ch->enabled)
+ continue;
+
+ if (is_sub == lcdc_chan_is_sublcd(ch)) {
+ ch->frame_end = 1;
+ wake_up(&ch->frame_end_wait);
+
+ sh_mobile_lcdc_clk_off(priv);
+ }
+ }
+
return IRQ_HANDLED;
}
@@ -448,18 +469,27 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
struct sh_mobile_lcdc_board_cfg *board_cfg;
int k;
- /* tell the board code to disable the panel */
+ /* clean up deferred io and ask board code to disable panel */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
ch = &priv->ch[k];
- board_cfg = &ch->cfg.board_cfg;
- if (board_cfg->display_off)
- board_cfg->display_off(board_cfg->board_data);
- /* cleanup deferred io if enabled */
+ /* deferred io mode:
+ * flush frame, and wait for frame end interrupt
+ * clean up deferred io and enable clock
+ */
if (ch->info.fbdefio) {
+ ch->frame_end = 0;
+ schedule_delayed_work(&ch->info.deferred_work, 0);
+ wait_event(ch->frame_end_wait, ch->frame_end);
fb_deferred_io_cleanup(&ch->info);
ch->info.fbdefio = NULL;
+ sh_mobile_lcdc_clk_on(priv);
}
+
+ board_cfg = &ch->cfg.board_cfg;
+ if (board_cfg->display_off)
+ board_cfg->display_off(board_cfg->board_data);
+
}
/* stop the lcdc */
@@ -652,6 +682,26 @@ static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp)
return 0;
}
+static int sh_mobile_lcdc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ sh_mobile_lcdc_stop(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static int sh_mobile_lcdc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ return sh_mobile_lcdc_start(platform_get_drvdata(pdev));
+}
+
+static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
+ .suspend = sh_mobile_lcdc_suspend,
+ .resume = sh_mobile_lcdc_resume,
+};
+
static int sh_mobile_lcdc_remove(struct platform_device *pdev);
static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
@@ -707,6 +757,7 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unsupported interface type\n");
goto err1;
}
+ init_waitqueue_head(&priv->ch[i].frame_end_wait);
switch (pdata->ch[i].chan) {
case LCDC_CHAN_MAINLCD:
@@ -860,6 +911,7 @@ static struct platform_driver sh_mobile_lcdc_driver = {
.driver = {
.name = "sh_mobile_lcdc_fb",
.owner = THIS_MODULE,
+ .pm = &sh_mobile_lcdc_dev_pm_ops,
},
.probe = sh_mobile_lcdc_probe,
.remove = sh_mobile_lcdc_remove,