summaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig18
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/arm-charlcd.c7
-rw-r--r--drivers/misc/ds1682.c5
-rw-r--r--drivers/misc/genwqe/card_base.h58
-rw-r--r--drivers/misc/genwqe/card_ddcb.c6
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/genwqe/card_dev.c44
-rw-r--r--drivers/misc/genwqe/card_utils.c172
-rw-r--r--drivers/misc/genwqe/genwqe_driver.h2
-rw-r--r--drivers/misc/mei/amthif.c2
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/misc/mei/client.c90
-rw-r--r--drivers/misc/mei/hbm.c97
-rw-r--r--drivers/misc/mei/hbm.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h14
-rw-r--r--drivers/misc/mei/hw-me.c322
-rw-r--r--drivers/misc/mei/hw-me.h15
-rw-r--r--drivers/misc/mei/hw-txe-regs.h2
-rw-r--r--drivers/misc/mei/hw-txe.c111
-rw-r--r--drivers/misc/mei/hw-txe.h21
-rw-r--r--drivers/misc/mei/hw.h25
-rw-r--r--drivers/misc/mei/init.c60
-rw-r--r--drivers/misc/mei/interrupt.c3
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mei/mei_dev.h101
-rw-r--r--drivers/misc/mei/pci-me.c242
-rw-r--r--drivers/misc/mei/pci-txe.c155
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/misc/sgi-gru/grufile.c11
-rw-r--r--drivers/misc/vexpress-syscfg.c324
31 files changed, 1675 insertions, 249 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1cb74085e41..a43d0c46727 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on HAVE_CLK
+ depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45
help
This option enables device driver support for the PWM channels
on certain Atmel processors. Pulse Width Modulation is used for
@@ -200,7 +201,7 @@ config ICS932S401
config ATMEL_SSC
tristate "Device driver for Atmel SSC peripheral"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && (AVR32 || ARCH_AT91 || COMPILE_TEST)
---help---
This option enables device driver support for Atmel Synchronized
Serial Communication peripheral (SSC).
@@ -300,8 +301,8 @@ config SGI_GRU_DEBUG
depends on SGI_GRU
default n
---help---
- This option enables addition debugging code for the SGI GRU driver. If
- you are unsure, say N.
+ This option enables additional debugging code for the SGI GRU driver.
+ If you are unsure, say N.
config APDS9802ALS
tristate "Medfield Avago APDS9802 ALS Sensor module"
@@ -468,7 +469,7 @@ config BMP085_SPI
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
select GENERIC_NET_UTILS
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
help
This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
@@ -515,6 +516,15 @@ config SRAM
the genalloc API. It is supposed to be used for small on-chip SRAM
areas found on many SoCs.
+config VEXPRESS_SYSCFG
+ bool "Versatile Express System Configuration driver"
+ depends on VEXPRESS_CONFIG
+ default y
+ help
+ ARM Ltd. Versatile Express uses specialised platform configuration
+ bus. System Configuration interface is one of the possible means
+ of generating transactions on this bus.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7eb4b69580c..d59ce1261b3 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
+obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index b7ebf8021d9..c72e96b523e 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -366,11 +367,17 @@ static const struct dev_pm_ops charlcd_pm_ops = {
.resume = charlcd_resume,
};
+static const struct of_device_id charlcd_match[] = {
+ { .compatible = "arm,versatile-lcd", },
+ {}
+};
+
static struct platform_driver charlcd_driver = {
.driver = {
.name = DRIVERNAME,
.owner = THIS_MODULE,
.pm = &charlcd_pm_ops,
+ .of_match_table = of_match_ptr(charlcd_match),
},
.remove = __exit_p(charlcd_remove),
};
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 6a672f9ef52..b909fb30232 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -85,7 +85,6 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
- char *endp;
u64 val;
__le32 val_le;
int rc;
@@ -93,8 +92,8 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name);
/* Decode input */
- val = simple_strtoull(buf, &endp, 0);
- if (buf == endp) {
+ rc = kstrtoull(buf, 0, &val);
+ if (rc < 0) {
dev_dbg(dev, "input string not a number\n");
return -EINVAL;
}
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 5e4dbd21f89..0e608a28860 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -337,6 +337,44 @@ enum genwqe_requ_state {
};
/**
+ * struct genwqe_sgl - Scatter gather list describing user-space memory
+ * @sgl: scatter gather list needs to be 128 byte aligned
+ * @sgl_dma_addr: dma address of sgl
+ * @sgl_size: size of area used for sgl
+ * @user_addr: user-space address of memory area
+ * @user_size: size of user-space memory area
+ * @page: buffer for partial pages if needed
+ * @page_dma_addr: dma address partial pages
+ */
+struct genwqe_sgl {
+ dma_addr_t sgl_dma_addr;
+ struct sg_entry *sgl;
+ size_t sgl_size; /* size of sgl */
+
+ void __user *user_addr; /* user-space base-address */
+ size_t user_size; /* size of memory area */
+
+ unsigned long nr_pages;
+ unsigned long fpage_offs;
+ size_t fpage_size;
+ size_t lpage_size;
+
+ void *fpage;
+ dma_addr_t fpage_dma_addr;
+
+ void *lpage;
+ dma_addr_t lpage_dma_addr;
+};
+
+int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
+ void __user *user_addr, size_t user_size);
+
+int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
+ dma_addr_t *dma_list);
+
+int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
+
+/**
* struct ddcb_requ - Kernel internal representation of the DDCB request
* @cmd: User space representation of the DDCB execution request
*/
@@ -347,9 +385,7 @@ struct ddcb_requ {
struct ddcb_queue *queue; /* associated queue */
struct dma_mapping dma_mappings[DDCB_FIXUPS];
- struct sg_entry *sgl[DDCB_FIXUPS];
- dma_addr_t sgl_dma_addr[DDCB_FIXUPS];
- size_t sgl_size[DDCB_FIXUPS];
+ struct genwqe_sgl sgls[DDCB_FIXUPS];
/* kernel/user shared content */
struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
@@ -453,22 +489,6 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
struct ddcb_requ *req);
-struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
- dma_addr_t *dma_addr, size_t *sgl_size);
-
-void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
- dma_addr_t dma_addr, size_t size);
-
-int genwqe_setup_sgl(struct genwqe_dev *cd,
- unsigned long offs,
- unsigned long size,
- struct sg_entry *sgl, /* genwqe sgl */
- dma_addr_t dma_addr, size_t sgl_size,
- dma_addr_t *dma_list, int page_offs, int num_pages);
-
-int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
- int size);
-
static inline bool dma_mapping_used(struct dma_mapping *m)
{
if (!m)
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 6f1acc0ccf8..c8046db2d5a 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -305,6 +305,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
break;
new = (old | DDCB_NEXT_BE32);
+
+ wmb();
icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
if (icrc_hsi_shi == old)
@@ -314,6 +316,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
/* Queue must be re-started by updating QUEUE_OFFSET */
ddcb_mark_tapped(pddcb);
num = (u64)ddcb_no << 8;
+
+ wmb();
__genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
return RET_DDCB_TAPPED;
@@ -1306,7 +1310,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
*/
int genwqe_finish_queue(struct genwqe_dev *cd)
{
- int i, rc, in_flight;
+ int i, rc = 0, in_flight;
int waitmax = genwqe_ddcb_software_timeout;
struct pci_dev *pci_dev = cd->pci_dev;
struct ddcb_queue *queue = &cd->queue;
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 50d2096ea1c..0a33ade6410 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -348,7 +348,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
char name[64];
unsigned int i;
- sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx);
+ sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx);
root = debugfs_create_dir(card_name, cd->debugfs_genwqe);
if (!root) {
@@ -454,7 +454,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
}
for (i = 0; i < GENWQE_MAX_VFS; i++) {
- sprintf(name, "vf%d_jobtimeout_msec", i);
+ sprintf(name, "vf%u_jobtimeout_msec", i);
file = debugfs_create_u32(name, 0666, root,
&cd->vf_jobtimeout_msec[i]);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 2c2c9cc7523..1d2f163a190 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -531,7 +531,9 @@ static int do_flash_update(struct genwqe_file *cfile,
case '1':
cmdopts = 0x1C;
break; /* download/erase_first/part_1 */
- case 'v': /* cmdopts = 0x0c (VPD) */
+ case 'v':
+ cmdopts = 0x0C;
+ break; /* download/erase_first/vpd */
default:
return -EINVAL;
}
@@ -665,6 +667,8 @@ static int do_flash_read(struct genwqe_file *cfile,
cmdopts = 0x1A;
break; /* upload/part_1 */
case 'v':
+ cmdopts = 0x0A;
+ break; /* upload/vpd */
default:
return -EINVAL;
}
@@ -836,15 +840,8 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
__genwqe_del_mapping(cfile, dma_map);
genwqe_user_vunmap(cd, dma_map, req);
}
- if (req->sgl[i] != NULL) {
- genwqe_free_sgl(cd, req->sgl[i],
- req->sgl_dma_addr[i],
- req->sgl_size[i]);
- req->sgl[i] = NULL;
- req->sgl_dma_addr[i] = 0x0;
- req->sgl_size[i] = 0;
- }
-
+ if (req->sgls[i].sgl != NULL)
+ genwqe_free_sync_sgl(cd, &req->sgls[i]);
}
return 0;
}
@@ -913,7 +910,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
case ATS_TYPE_SGL_RDWR:
case ATS_TYPE_SGL_RD: {
- int page_offs, nr_pages, offs;
+ int page_offs;
u_addr = be64_to_cpu(*((__be64 *)
&cmd->asiv[asiv_offs]));
@@ -951,27 +948,18 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
page_offs = 0;
}
- offs = offset_in_page(u_addr);
- nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE);
-
/* create genwqe style scatter gather list */
- req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages,
- &req->sgl_dma_addr[i],
- &req->sgl_size[i]);
- if (req->sgl[i] == NULL) {
- rc = -ENOMEM;
+ rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
+ (void __user *)u_addr,
+ u_size);
+ if (rc != 0)
goto err_out;
- }
- genwqe_setup_sgl(cd, offs, u_size,
- req->sgl[i],
- req->sgl_dma_addr[i],
- req->sgl_size[i],
- m->dma_list,
- page_offs,
- nr_pages);
+
+ genwqe_setup_sgl(cd, &req->sgls[i],
+ &m->dma_list[page_offs]);
*((__be64 *)&cmd->asiv[asiv_offs]) =
- cpu_to_be64(req->sgl_dma_addr[i]);
+ cpu_to_be64(req->sgls[i].sgl_dma_addr);
break;
}
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 6b1a6ef9f1a..62cc6bb3f62 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -275,67 +275,107 @@ static int genwqe_sgl_size(int num_pages)
return roundup(len, PAGE_SIZE);
}
-struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
- dma_addr_t *dma_addr, size_t *sgl_size)
+/**
+ * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
+ *
+ * Allocates memory for sgl and overlapping pages. Pages which might
+ * overlap other user-space memory blocks are being cached for DMAs,
+ * such that we do not run into syncronization issues. Data is copied
+ * from user-space into the cached pages.
+ */
+int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
+ void __user *user_addr, size_t user_size)
{
+ int rc;
struct pci_dev *pci_dev = cd->pci_dev;
- struct sg_entry *sgl;
- *sgl_size = genwqe_sgl_size(num_pages);
- if (get_order(*sgl_size) > MAX_ORDER) {
+ sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
+ sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
+ sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
+ sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
+
+ dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld "
+ "fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
+ __func__, user_addr, user_size, sgl->nr_pages,
+ sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
+
+ sgl->user_addr = user_addr;
+ sgl->user_size = user_size;
+ sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
+
+ if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
- return NULL;
+ return -ENOMEM;
}
- sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr);
- if (sgl == NULL) {
+ sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
+ &sgl->sgl_dma_addr);
+ if (sgl->sgl == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: no memory available!\n", __func__);
- return NULL;
+ return -ENOMEM;
}
- return sgl;
+ /* Only use buffering on incomplete pages */
+ if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
+ sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
+ &sgl->fpage_dma_addr);
+ if (sgl->fpage == NULL)
+ goto err_out;
+
+ /* Sync with user memory */
+ if (copy_from_user(sgl->fpage + sgl->fpage_offs,
+ user_addr, sgl->fpage_size)) {
+ rc = -EFAULT;
+ goto err_out;
+ }
+ }
+ if (sgl->lpage_size != 0) {
+ sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
+ &sgl->lpage_dma_addr);
+ if (sgl->lpage == NULL)
+ goto err_out1;
+
+ /* Sync with user memory */
+ if (copy_from_user(sgl->lpage, user_addr + user_size -
+ sgl->lpage_size, sgl->lpage_size)) {
+ rc = -EFAULT;
+ goto err_out1;
+ }
+ }
+ return 0;
+
+ err_out1:
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
+ sgl->fpage_dma_addr);
+ err_out:
+ __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
+ sgl->sgl_dma_addr);
+ return -ENOMEM;
}
-int genwqe_setup_sgl(struct genwqe_dev *cd,
- unsigned long offs,
- unsigned long size,
- struct sg_entry *sgl,
- dma_addr_t dma_addr, size_t sgl_size,
- dma_addr_t *dma_list, int page_offs, int num_pages)
+int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
+ dma_addr_t *dma_list)
{
int i = 0, j = 0, p;
unsigned long dma_offs, map_offs;
- struct pci_dev *pci_dev = cd->pci_dev;
dma_addr_t prev_daddr = 0;
struct sg_entry *s, *last_s = NULL;
-
- /* sanity checks */
- if (offs > PAGE_SIZE) {
- dev_err(&pci_dev->dev,
- "[%s] too large start offs %08lx\n", __func__, offs);
- return -EFAULT;
- }
- if (sgl_size < genwqe_sgl_size(num_pages)) {
- dev_err(&pci_dev->dev,
- "[%s] sgl_size too small %08lx for %d pages\n",
- __func__, sgl_size, num_pages);
- return -EFAULT;
- }
+ size_t size = sgl->user_size;
dma_offs = 128; /* next block if needed/dma_offset */
- map_offs = offs; /* offset in first page */
+ map_offs = sgl->fpage_offs; /* offset in first page */
- s = &sgl[0]; /* first set of 8 entries */
+ s = &sgl->sgl[0]; /* first set of 8 entries */
p = 0; /* page */
- while (p < num_pages) {
+ while (p < sgl->nr_pages) {
dma_addr_t daddr;
unsigned int size_to_map;
/* always write the chaining entry, cleanup is done later */
j = 0;
- s[j].target_addr = cpu_to_be64(dma_addr + dma_offs);
+ s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
s[j].len = cpu_to_be32(128);
s[j].flags = cpu_to_be32(SG_CHAINED);
j++;
@@ -343,7 +383,17 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
while (j < 8) {
/* DMA mapping for requested page, offs, size */
size_to_map = min(size, PAGE_SIZE - map_offs);
- daddr = dma_list[page_offs + p] + map_offs;
+
+ if ((p == 0) && (sgl->fpage != NULL)) {
+ daddr = sgl->fpage_dma_addr + map_offs;
+
+ } else if ((p == sgl->nr_pages - 1) &&
+ (sgl->lpage != NULL)) {
+ daddr = sgl->lpage_dma_addr;
+ } else {
+ daddr = dma_list[p] + map_offs;
+ }
+
size -= size_to_map;
map_offs = 0;
@@ -358,7 +408,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
size_to_map);
p++; /* process next page */
- if (p == num_pages)
+ if (p == sgl->nr_pages)
goto fixup; /* nothing to do */
prev_daddr = daddr + size_to_map;
@@ -374,7 +424,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
j++;
p++; /* process next page */
- if (p == num_pages)
+ if (p == sgl->nr_pages)
goto fixup; /* nothing to do */
}
dma_offs += 128;
@@ -395,10 +445,50 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
return 0;
}
-void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
- dma_addr_t dma_addr, size_t size)
+/**
+ * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
+ *
+ * After the DMA transfer has been completed we free the memory for
+ * the sgl and the cached pages. Data is being transfered from cached
+ * pages into user-space buffers.
+ */
+int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
- __genwqe_free_consistent(cd, size, sg_list, dma_addr);
+ int rc = 0;
+ struct pci_dev *pci_dev = cd->pci_dev;
+
+ if (sgl->fpage) {
+ if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
+ sgl->fpage_size)) {
+ dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
+ __func__);
+ rc = -EFAULT;
+ }
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
+ sgl->fpage_dma_addr);
+ sgl->fpage = NULL;
+ sgl->fpage_dma_addr = 0;
+ }
+ if (sgl->lpage) {
+ if (copy_to_user(sgl->user_addr + sgl->user_size -
+ sgl->lpage_size, sgl->lpage,
+ sgl->lpage_size)) {
+ dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
+ __func__);
+ rc = -EFAULT;
+ }
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+ sgl->lpage_dma_addr);
+ sgl->lpage = NULL;
+ sgl->lpage_dma_addr = 0;
+ }
+ __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
+ sgl->sgl_dma_addr);
+
+ sgl->sgl = NULL;
+ sgl->sgl_dma_addr = 0x0;
+ sgl->sgl_size = 0;
+ return rc;
}
/**
@@ -628,7 +718,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
- rc = pci_enable_msi_block(pci_dev, count);
+ rc = pci_enable_msi_exact(pci_dev, count);
if (rc == 0)
cd->flags |= GENWQE_FLAG_MSI_ENABLED;
return rc;
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h
index 46e916b36c7..cd5263163a6 100644
--- a/drivers/misc/genwqe/genwqe_driver.h
+++ b/drivers/misc/genwqe/genwqe_driver.h
@@ -36,7 +36,7 @@
#include <asm/byteorder.h>
#include <linux/genwqe/genwqe_card.h>
-#define DRV_VERS_STRING "2.0.0"
+#define DRV_VERS_STRING "2.0.15"
/*
* Static minor number assignement, until we decide/implement
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index b8deb345548..0d6234db00f 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -111,8 +111,6 @@ int mei_amthif_host_init(struct mei_device *dev)
return ret;
}
- cl->state = MEI_FILE_CONNECTING;
-
ret = mei_cl_connect(cl, NULL);
dev->iamthif_state = MEI_IAMTHIF_IDLE;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index ddc5ac92a20..0e993ef28b9 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -247,7 +247,7 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
return id;
if (length > dev->me_clients[id].props.max_msg_length)
- return -EINVAL;
+ return -EFBIG;
cb = mei_io_cb_init(cl, NULL);
if (!cb)
@@ -427,8 +427,6 @@ int mei_cl_enable_device(struct mei_cl_device *device)
mutex_lock(&dev->device_lock);
- cl->state = MEI_FILE_CONNECTING;
-
err = mei_cl_connect(cl, NULL);
if (err < 0) {
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 8c078b808cd..59d20c599b1 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/mei.h>
@@ -415,6 +416,10 @@ void mei_host_client_init(struct work_struct *work)
dev->reset_count = 0;
mutex_unlock(&dev->device_lock);
+
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n");
+ pm_runtime_autosuspend(&dev->pdev->dev);
}
/**
@@ -425,6 +430,12 @@ void mei_host_client_init(struct work_struct *work)
*/
bool mei_hbuf_acquire(struct mei_device *dev)
{
+ if (mei_pg_state(dev) == MEI_PG_ON ||
+ dev->pg_event == MEI_PG_EVENT_WAIT) {
+ dev_dbg(&dev->pdev->dev, "device is in pg\n");
+ return false;
+ }
+
if (!dev->hbuf_is_ready) {
dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
return false;
@@ -460,9 +471,18 @@ int mei_cl_disconnect(struct mei_cl *cl)
if (cl->state != MEI_FILE_DISCONNECTING)
return 0;
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
+ if (!cb) {
+ rets = -ENOMEM;
+ goto free;
+ }
cb->fop_type = MEI_FOP_CLOSE;
if (mei_hbuf_acquire(dev)) {
@@ -494,8 +514,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
cl_err(dev, cl, "wrong status client disconnect.\n");
if (err)
- cl_dbg(dev, cl, "wait failed disconnect err=%08x\n",
- err);
+ cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err);
cl_err(dev, cl, "failed to disconnect from FW client.\n");
}
@@ -503,6 +522,10 @@ int mei_cl_disconnect(struct mei_cl *cl)
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
mei_io_cb_free(cb);
return rets;
}
@@ -557,6 +580,13 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
dev = cl->dev;
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
cb = mei_io_cb_init(cl, file);
if (!cb) {
rets = -ENOMEM;
@@ -567,6 +597,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
+ cl->state = MEI_FILE_CONNECTING;
if (mei_hbm_cl_connect_req(dev, cl)) {
rets = -ENODEV;
goto out;
@@ -596,6 +627,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
rets = cl->status;
out:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
mei_io_cb_free(cb);
return rets;
}
@@ -713,23 +748,31 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
return -ENOTTY;
}
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
+
cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
/* always allocate at least client max message */
length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
rets = mei_io_cb_alloc_resp_buf(cb, length);
if (rets)
- goto err;
+ goto out;
cb->fop_type = MEI_FOP_READ;
if (mei_hbuf_acquire(dev)) {
- if (mei_hbm_cl_flow_control_req(dev, cl)) {
- cl_err(dev, cl, "flow control send failed\n");
- rets = -ENODEV;
- goto err;
- }
+ rets = mei_hbm_cl_flow_control_req(dev, cl);
+ if (rets < 0)
+ goto out;
+
list_add_tail(&cb->list, &dev->read_list.list);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
@@ -737,9 +780,14 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
cl->read_cb = cb;
- return rets;
-err:
- mei_io_cb_free(cb);
+out:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
+ if (rets)
+ mei_io_cb_free(cb);
+
return rets;
}
@@ -776,7 +824,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return rets;
if (rets == 0) {
- cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
+ cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
return 0;
}
@@ -856,6 +904,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
+ rets = pm_runtime_get(&dev->pdev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(&dev->pdev->dev);
+ cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ return rets;
+ }
cb->fop_type = MEI_FOP_WRITE;
cb->buf_idx = 0;
@@ -926,6 +980,10 @@ out:
rets = buf->size;
err:
+ cl_dbg(dev, cl, "rpm: autosuspend\n");
+ pm_runtime_mark_last_busy(&dev->pdev->dev);
+ pm_runtime_put_autosuspend(&dev->pdev->dev);
+
return rets;
}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 4960288e543..804106209d7 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -14,10 +14,12 @@
*
*/
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mei.h>
+#include <linux/pm_runtime.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -58,6 +60,34 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
}
/**
+ * mei_hbm_idle - set hbm to idle state
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_idle(struct mei_device *dev)
+{
+ dev->init_clients_timer = 0;
+ dev->hbm_state = MEI_HBM_IDLE;
+}
+
+/**
+ * mei_hbm_reset - reset hbm counters and book keeping data structurs
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_reset(struct mei_device *dev)
+{
+ dev->me_clients_num = 0;
+ dev->me_client_presentation_num = 0;
+ dev->me_client_index = 0;
+
+ kfree(dev->me_clients);
+ dev->me_clients = NULL;
+
+ mei_hbm_idle(dev);
+}
+
+/**
* mei_hbm_me_cl_allocate - allocates storage for me clients
*
* @dev: the device structure
@@ -69,9 +99,7 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev)
struct mei_me_client *clients;
int b;
- dev->me_clients_num = 0;
- dev->me_client_presentation_num = 0;
- dev->me_client_index = 0;
+ mei_hbm_reset(dev);
/* count how many ME clients we have */
for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
@@ -80,9 +108,6 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev)
if (dev->me_clients_num == 0)
return 0;
- kfree(dev->me_clients);
- dev->me_clients = NULL;
-
dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n",
dev->me_clients_num * sizeof(struct mei_me_client));
/* allocate storage for ME clients representation */
@@ -133,17 +158,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
}
-/**
- * mei_hbm_idle - set hbm to idle state
- *
- * @dev: the device structure
- */
-void mei_hbm_idle(struct mei_device *dev)
-{
- dev->init_clients_timer = 0;
- dev->hbm_state = MEI_HBM_IDLE;
-}
-
int mei_hbm_start_wait(struct mei_device *dev)
{
int ret;
@@ -289,6 +303,34 @@ static int mei_hbm_prop_req(struct mei_device *dev)
return 0;
}
+/*
+ * mei_hbm_pg - sends pg command
+ *
+ * @dev: the device structure
+ * @pg_cmd: the pg command code
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_power_gate *req;
+ const size_t len = sizeof(struct hbm_power_gate);
+ int ret;
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ req = (struct hbm_power_gate *)dev->wr_msg.data;
+ memset(req, 0, len);
+ req->hbm_cmd = pg_cmd;
+
+ ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ if (ret)
+ dev_err(&dev->pdev->dev, "power gate command write failed.\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mei_hbm_pg);
+
/**
* mei_hbm_stop_req - send stop request message
*
@@ -701,6 +743,27 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
mei_hbm_cl_flow_control_res(dev, flow_control);
break;
+ case MEI_PG_ISOLATION_ENTRY_RES_CMD:
+ dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n");
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ if (waitqueue_active(&dev->wait_pg))
+ wake_up(&dev->wait_pg);
+ break;
+
+ case MEI_PG_ISOLATION_EXIT_REQ_CMD:
+ dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n");
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ if (waitqueue_active(&dev->wait_pg))
+ wake_up(&dev->wait_pg);
+ else
+ /*
+ * If the driver is not waiting on this then
+ * this is HW initiated exit from PG.
+ * Start runtime pm resume sequence to exit from PG.
+ */
+ pm_request_resume(&dev->pdev->dev);
+ break;
+
case HOST_CLIENT_PROPERTIES_RES_CMD:
dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n");
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 20e8782711c..683eb2835ce 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -50,6 +50,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
}
void mei_hbm_idle(struct mei_device *dev);
+void mei_hbm_reset(struct mei_device *dev);
int mei_hbm_start_req(struct mei_device *dev);
int mei_hbm_start_wait(struct mei_device *dev);
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
@@ -57,6 +58,7 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
bool mei_hbm_version_is_supported(struct mei_device *dev);
+int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
#endif /* _MEI_HBM_H_ */
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 66f411a6e8e..a7856c0ac57 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -115,6 +115,11 @@
#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
+
+/* Host Firmware Status Registers in PCI Config Space */
+#define PCI_CFG_HFS_1 0x40
+#define PCI_CFG_HFS_2 0x48
+
/*
* MEI HW Section
*/
@@ -128,6 +133,8 @@
#define ME_CB_RW 8
/* ME_CSR_HA - ME Control Status Host Access register (read only) */
#define ME_CSR_HA 0xC
+/* H_HGC_CSR - PGI register */
+#define H_HPG_CSR 0x10
/* register bits of H_CSR (Host Control Status register) */
@@ -157,6 +164,8 @@ access to ME_CBD */
#define ME_CBWP_HRA 0x00FF0000
/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
#define ME_CBRP_HRA 0x0000FF00
+/* ME Power Gate Isolation Capability HRA - host ready only access */
+#define ME_PGIC_HRA 0x00000040
/* ME Reset HRA - host read only access to ME_RST */
#define ME_RST_HRA 0x00000010
/* ME Ready HRA - host read only access to ME_RDY */
@@ -168,4 +177,9 @@ access to ME_CBD */
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
+
+/* register bits - H_HPG_CSR */
+#define H_HPG_CSR_PGIHEXR 0x00000001
+#define H_HPG_CSR_PGI 0x00000002
+
#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 8dbdaaef1af..6a2d272cea4 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -109,10 +109,27 @@ static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
*/
static void mei_me_hw_config(struct mei_device *dev)
{
+ struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(to_me_hw(dev));
/* Doesn't change in runtime */
dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+
+ hw->pg_state = MEI_PG_OFF;
+}
+
+/**
+ * mei_me_pg_state - translate internal pg state
+ * to the mei power gating state
+ *
+ * @hw - me hardware
+ * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
+ */
+static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ return hw->pg_state;
}
+
/**
* mei_clear_interrupts - clear and stop interrupts
*
@@ -164,6 +181,9 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(hw, hcsr);
+
+ /* complete this write before we set host ready on another CPU */
+ mmiowb();
}
/**
* mei_me_hw_reset - resets fw via mei csr register.
@@ -183,8 +203,21 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
else
hcsr &= ~H_IE;
+ dev->recvd_hw_ready = false;
mei_me_reg_write(hw, H_CSR, hcsr);
+ /*
+ * Host reads the H_CSR once to ensure that the
+ * posted write to H_CSR completes.
+ */
+ hcsr = mei_hcsr_read(hw);
+
+ if ((hcsr & H_RST) == 0)
+ dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr);
+
+ if ((hcsr & H_RDY) == H_RDY)
+ dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr);
+
if (intr_enable == false)
mei_me_hw_reset_release(dev);
@@ -201,6 +234,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
static void mei_me_host_set_ready(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state = mei_hcsr_read(hw);
hw->host_hw_state |= H_IE | H_IG | H_RDY;
mei_hcsr_set(hw, hw->host_hw_state);
}
@@ -233,10 +267,7 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
static int mei_me_hw_ready_wait(struct mei_device *dev)
{
int err;
- if (mei_me_hw_is_ready(dev))
- return 0;
- dev->recvd_hw_ready = false;
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
@@ -431,6 +462,144 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
}
/**
+ * mei_me_pg_enter - write pg enter register to mei device.
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_enter(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
+ reg |= H_HPG_CSR_PGI;
+ mei_me_reg_write(hw, H_HPG_CSR, reg);
+}
+
+/**
+ * mei_me_pg_enter - write pg enter register to mei device.
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_exit(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
+
+ WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
+
+ reg |= H_HPG_CSR_PGIHEXR;
+ mei_me_reg_write(hw, H_HPG_CSR, reg);
+}
+
+/**
+ * mei_me_pg_set_sync - perform pg entry procedure
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success an error code otherwise
+ */
+int mei_me_pg_set_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ int ret;
+
+ dev->pg_event = MEI_PG_EVENT_WAIT;
+
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
+ if (ret)
+ return ret;
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+ if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
+ mei_me_pg_enter(dev);
+ ret = 0;
+ } else {
+ ret = -ETIME;
+ }
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ hw->pg_state = MEI_PG_ON;
+
+ return ret;
+}
+
+/**
+ * mei_me_pg_unset_sync - perform pg exit procedure
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success an error code otherwise
+ */
+int mei_me_pg_unset_sync(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ int ret;
+
+ if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
+ goto reply;
+
+ dev->pg_event = MEI_PG_EVENT_WAIT;
+
+ mei_me_pg_exit(dev);
+
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_pg,
+ dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ mutex_lock(&dev->device_lock);
+
+reply:
+ if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
+ ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
+ else
+ ret = -ETIME;
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ hw->pg_state = MEI_PG_OFF;
+
+ return ret;
+}
+
+/**
+ * mei_me_pg_is_enabled - detect if PG is supported by HW
+ *
+ * @dev: the device structure
+ *
+ * returns: true is pg supported, false otherwise
+ */
+static bool mei_me_pg_is_enabled(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
+
+ if ((reg & ME_PGIC_HRA) == 0)
+ goto notsupported;
+
+ if (dev->version.major_version < HBM_MAJOR_VERSION_PGI)
+ goto notsupported;
+
+ if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
+ dev->version.minor_version < HBM_MINOR_VERSION_PGI)
+ goto notsupported;
+
+ return true;
+
+notsupported:
+ dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
+ !!(reg & ME_PGIC_HRA),
+ dev->version.major_version,
+ dev->version.minor_version,
+ HBM_MAJOR_VERSION_PGI,
+ HBM_MINOR_VERSION_PGI);
+
+ return false;
+}
+
+/**
* mei_me_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
@@ -491,14 +660,13 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
+ mei_me_hw_reset_release(dev);
dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
dev->recvd_hw_ready = true;
wake_up_interruptible(&dev->wait_hw_ready);
} else {
-
- dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
- mei_me_hw_reset_release(dev);
+ dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n");
}
goto end;
}
@@ -524,9 +692,15 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
- rets = mei_irq_write_handler(dev, &complete_list);
-
- dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ /*
+ * During PG handshake only allowed write is the replay to the
+ * PG exit message, so block calling write function
+ * if the pg state is not idle
+ */
+ if (dev->pg_event == MEI_PG_EVENT_IDLE) {
+ rets = mei_irq_write_handler(dev, &complete_list);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ }
mei_irq_compl_handler(dev, &complete_list);
@@ -535,8 +709,65 @@ end:
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
+
+/**
+ * mei_me_fw_status - retrieve fw status from the pci config space
+ *
+ * @dev: the device structure
+ * @fw_status: fw status registers storage
+ *
+ * returns 0 on success an error code otherwise
+ */
+static int mei_me_fw_status(struct mei_device *dev,
+ struct mei_fw_status *fw_status)
+{
+ const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2};
+ int i;
+
+ if (!fw_status)
+ return -EINVAL;
+
+ switch (dev->pdev->device) {
+ case MEI_DEV_ID_IBXPK_1:
+ case MEI_DEV_ID_IBXPK_2:
+ case MEI_DEV_ID_CPT_1:
+ case MEI_DEV_ID_PBG_1:
+ case MEI_DEV_ID_PPT_1:
+ case MEI_DEV_ID_PPT_2:
+ case MEI_DEV_ID_PPT_3:
+ case MEI_DEV_ID_LPT_H:
+ case MEI_DEV_ID_LPT_W:
+ case MEI_DEV_ID_LPT_LP:
+ case MEI_DEV_ID_LPT_HR:
+ case MEI_DEV_ID_WPT_LP:
+ fw_status->count = 2;
+ break;
+ case MEI_DEV_ID_ICH10_1:
+ case MEI_DEV_ID_ICH10_2:
+ case MEI_DEV_ID_ICH10_3:
+ case MEI_DEV_ID_ICH10_4:
+ fw_status->count = 1;
+ break;
+ default:
+ fw_status->count = 0;
+ break;
+ }
+
+ for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
+ int ret;
+ ret = pci_read_config_dword(dev->pdev,
+ pci_cfg_reg[i], &fw_status->status[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static const struct mei_hw_ops mei_me_hw_ops = {
+ .pg_state = mei_me_pg_state,
+
+ .fw_status = mei_me_fw_status,
.host_is_ready = mei_me_host_is_ready,
.hw_is_ready = mei_me_hw_is_ready,
@@ -544,6 +775,8 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.hw_config = mei_me_hw_config,
.hw_start = mei_me_hw_start,
+ .pg_is_enabled = mei_me_pg_is_enabled,
+
.intr_clear = mei_me_intr_clear,
.intr_enable = mei_me_intr_enable,
.intr_disable = mei_me_intr_disable,
@@ -559,14 +792,81 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.read = mei_me_read_slots
};
+static bool mei_me_fw_type_nm(struct pci_dev *pdev)
+{
+ u32 reg;
+ pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+ /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
+ return (reg & 0x600) == 0x200;
+}
+
+#define MEI_CFG_FW_NM \
+ .quirk_probe = mei_me_fw_type_nm
+
+static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+{
+ u32 reg;
+ /* Read ME FW Status check for SPS Firmware */
+ pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ /* if bits [19:16] = 15, running SPS Firmware */
+ return (reg & 0xf0000) == 0xf0000;
+}
+
+#define MEI_CFG_FW_SPS \
+ .quirk_probe = mei_me_fw_type_sps
+
+
+#define MEI_CFG_LEGACY_HFS \
+ .fw_status.count = 0
+
+#define MEI_CFG_ICH_HFS \
+ .fw_status.count = 1, \
+ .fw_status.status[0] = PCI_CFG_HFS_1
+
+#define MEI_CFG_PCH_HFS \
+ .fw_status.count = 2, \
+ .fw_status.status[0] = PCI_CFG_HFS_1, \
+ .fw_status.status[1] = PCI_CFG_HFS_2
+
+
+/* ICH Legacy devices */
+const struct mei_cfg mei_me_legacy_cfg = {
+ MEI_CFG_LEGACY_HFS,
+};
+
+/* ICH devices */
+const struct mei_cfg mei_me_ich_cfg = {
+ MEI_CFG_ICH_HFS,
+};
+
+/* PCH devices */
+const struct mei_cfg mei_me_pch_cfg = {
+ MEI_CFG_PCH_HFS,
+};
+
+
+/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
+const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
+ MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_NM,
+};
+
+/* PCH Lynx Point with quirk for SPS Firmware exclusion */
+const struct mei_cfg mei_me_lpt_cfg = {
+ MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_SPS,
+};
+
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
* @pdev: The pci device structure
+ * @cfg: per device generation config
*
* returns The mei_device_device pointer on success, NULL on failure.
*/
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg)
{
struct mei_device *dev;
@@ -575,7 +875,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
- mei_device_init(dev);
+ mei_device_init(dev, cfg);
dev->ops = &mei_me_hw_ops;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 893d5119fa9..12b0f4bbe1f 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -24,6 +24,8 @@
#include "mei_dev.h"
#include "client.h"
+#define MEI_ME_RPM_TIMEOUT 500 /* ms */
+
struct mei_me_hw {
void __iomem *mem_addr;
/*
@@ -31,11 +33,22 @@ struct mei_me_hw {
*/
u32 host_hw_state;
u32 me_hw_state;
+ enum mei_pg_state pg_state;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
+extern const struct mei_cfg mei_me_legacy_cfg;
+extern const struct mei_cfg mei_me_ich_cfg;
+extern const struct mei_cfg mei_me_pch_cfg;
+extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg;
+extern const struct mei_cfg mei_me_lpt_cfg;
+
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg);
+
+int mei_me_pg_set_sync(struct mei_device *dev);
+int mei_me_pg_unset_sync(struct mei_device *dev);
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h
index 7283c24c1af..f19229c4e65 100644
--- a/drivers/misc/mei/hw-txe-regs.h
+++ b/drivers/misc/mei/hw-txe-regs.h
@@ -89,7 +89,7 @@ enum {
# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000
# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000
# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000
-
+#define PCI_CFG_TXE_FW_STS1 0x48
#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index f60182a52f9..93273783dec 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -158,7 +158,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n",
hw->aliveness, req);
if (do_req) {
- hw->recvd_aliveness = false;
+ dev->pg_event = MEI_PG_EVENT_WAIT;
mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
}
return do_req;
@@ -213,6 +213,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
do {
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
+ dev->pg_event = MEI_PG_EVENT_IDLE;
dev_dbg(&dev->pdev->dev,
"aliveness settled after %d msecs\n", t);
return t;
@@ -223,6 +224,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
t += MSEC_PER_SEC / 5;
} while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
+ dev->pg_event = MEI_PG_EVENT_IDLE;
dev_err(&dev->pdev->dev, "aliveness timed out\n");
return -ETIME;
}
@@ -249,19 +251,22 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
return 0;
mutex_unlock(&dev->device_lock);
- err = wait_event_timeout(hw->wait_aliveness,
- hw->recvd_aliveness, timeout);
+ err = wait_event_timeout(hw->wait_aliveness_resp,
+ dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
mutex_lock(&dev->device_lock);
hw->aliveness = mei_txe_aliveness_get(dev);
ret = hw->aliveness == expected ? 0 : -ETIME;
if (ret)
- dev_err(&dev->pdev->dev, "aliveness timed out");
+ dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
+ err, hw->aliveness, dev->pg_event);
else
- dev_dbg(&dev->pdev->dev, "aliveness settled after %d msecs\n",
- jiffies_to_msecs(timeout - err));
- hw->recvd_aliveness = false;
+ dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
+ jiffies_to_msecs(timeout - err),
+ hw->aliveness, dev->pg_event);
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
return ret;
}
@@ -280,6 +285,32 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
}
/**
+ * mei_txe_pg_is_enabled - detect if PG is supported by HW
+ *
+ * @dev: the device structure
+ *
+ * returns: true is pg supported, false otherwise
+ */
+static bool mei_txe_pg_is_enabled(struct mei_device *dev)
+{
+ return true;
+}
+
+/**
+ * mei_txe_pg_state - translate aliveness register value
+ * to the mei power gating state
+ *
+ * @dev: the device structure
+ *
+ * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
+ */
+static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
+{
+ struct mei_txe_hw *hw = to_txe_hw(dev);
+ return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
+}
+
+/**
* mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
*
* @dev: the device structure
@@ -589,7 +620,10 @@ static int mei_txe_write(struct mei_device *dev,
mei_txe_input_ready_interrupt_enable(dev);
if (!mei_txe_is_input_ready(dev)) {
- dev_err(&dev->pdev->dev, "Input is not ready");
+ struct mei_fw_status fw_status;
+ mei_fw_status(dev, &fw_status);
+ dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n",
+ FW_STS_PRM(fw_status));
return -EAGAIN;
}
@@ -960,9 +994,9 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
/* Clear the interrupt cause */
dev_dbg(&dev->pdev->dev,
"Aliveness Interrupt: Status: %d\n", hw->aliveness);
- hw->recvd_aliveness = true;
- if (waitqueue_active(&hw->wait_aliveness))
- wake_up(&hw->wait_aliveness);
+ dev->pg_event = MEI_PG_EVENT_RECEIVED;
+ if (waitqueue_active(&hw->wait_aliveness_resp))
+ wake_up(&hw->wait_aliveness_resp);
}
@@ -1008,15 +1042,51 @@ end:
return IRQ_HANDLED;
}
+
+/**
+ * mei_txe_fw_status - retrieve fw status from the pci config space
+ *
+ * @dev: the device structure
+ * @fw_status: fw status registers storage
+ *
+ * returns: 0 on success an error code otherwise
+ */
+static int mei_txe_fw_status(struct mei_device *dev,
+ struct mei_fw_status *fw_status)
+{
+ const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1};
+ int i;
+
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = 2;
+
+ for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
+ int ret;
+ ret = pci_read_config_dword(dev->pdev,
+ pci_cfg_reg[i], &fw_status->status[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct mei_hw_ops mei_txe_hw_ops = {
+ .fw_status = mei_txe_fw_status,
.host_is_ready = mei_txe_host_is_ready,
+ .pg_state = mei_txe_pg_state,
+
.hw_is_ready = mei_txe_hw_is_ready,
.hw_reset = mei_txe_hw_reset,
.hw_config = mei_txe_hw_config,
.hw_start = mei_txe_hw_start,
+ .pg_is_enabled = mei_txe_pg_is_enabled,
+
.intr_clear = mei_txe_intr_clear,
.intr_enable = mei_txe_intr_enable,
.intr_disable = mei_txe_intr_disable,
@@ -1034,14 +1104,27 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
};
+#define MEI_CFG_TXE_FW_STS \
+ .fw_status.count = 2, \
+ .fw_status.status[0] = PCI_CFG_TXE_FW_STS0, \
+ .fw_status.status[1] = PCI_CFG_TXE_FW_STS1
+
+const struct mei_cfg mei_txe_cfg = {
+ MEI_CFG_TXE_FW_STS,
+};
+
+
/**
* mei_txe_dev_init - allocates and initializes txe hardware specific structure
*
* @pdev - pci device
+ * @cfg - per device generation config
+ *
* returns struct mei_device * on success or NULL;
*
*/
-struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg)
{
struct mei_device *dev;
struct mei_txe_hw *hw;
@@ -1051,11 +1134,11 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
- mei_device_init(dev);
+ mei_device_init(dev, cfg);
hw = to_txe_hw(dev);
- init_waitqueue_head(&hw->wait_aliveness);
+ init_waitqueue_head(&hw->wait_aliveness_resp);
dev->ops = &mei_txe_hw_ops;
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index 0812d98633a..e244af79167 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -22,6 +22,8 @@
#include "hw.h"
#include "hw-txe-regs.h"
+#define MEI_TXI_RPM_TIMEOUT 500 /* ms */
+
/* Flatten Hierarchy interrupt cause */
#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */
#define TXE_INTR_READINESS HISR_INT_0_STS
@@ -35,12 +37,11 @@
/**
* struct mei_txe_hw - txe hardware specifics
*
- * @mem_addr: SeC and BRIDGE bars
- * @aliveness: aliveness (power gating) state of the hardware
- * @readiness: readiness state of the hardware
- * @wait_aliveness: aliveness wait queue
- * @recvd_aliveness: aliveness interrupt was recived
- * @intr_cause: translated interrupt cause
+ * @mem_addr: SeC and BRIDGE bars
+ * @aliveness: aliveness (power gating) state of the hardware
+ * @readiness: readiness state of the hardware
+ * @wait_aliveness_resp: aliveness wait queue
+ * @intr_cause: translated interrupt cause
*/
struct mei_txe_hw {
void __iomem *mem_addr[NUM_OF_MEM_BARS];
@@ -48,8 +49,7 @@ struct mei_txe_hw {
u32 readiness;
u32 slots;
- wait_queue_head_t wait_aliveness;
- bool recvd_aliveness;
+ wait_queue_head_t wait_aliveness_resp;
unsigned long intr_cause;
};
@@ -61,7 +61,10 @@ static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw)
return container_of((void *)hw, struct mei_device, hw);
}
-struct mei_device *mei_txe_dev_init(struct pci_dev *pdev);
+extern const struct mei_cfg mei_txe_cfg;
+
+struct mei_device *mei_txe_dev_init(struct pci_dev *pdev,
+ const struct mei_cfg *cfg);
irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id);
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 6b476ab49b2..dd448e58cc8 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,14 +31,21 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
+#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
/*
* MEI Version
*/
-#define HBM_MINOR_VERSION 0
+#define HBM_MINOR_VERSION 1
#define HBM_MAJOR_VERSION 1
+/*
+ * MEI version with PGI support
+ */
+#define HBM_MINOR_VERSION_PGI 1
+#define HBM_MAJOR_VERSION_PGI 1
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -69,6 +76,11 @@
#define MEI_FLOW_CONTROL_CMD 0x08
+#define MEI_PG_ISOLATION_ENTRY_REQ_CMD 0x0a
+#define MEI_PG_ISOLATION_ENTRY_RES_CMD 0x8a
+#define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b
+#define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -208,6 +220,17 @@ struct hbm_props_response {
} __packed;
/**
+ * struct hbm_power_gate - power gate request/response
+ *
+ * @hbm_cmd - bus message command header
+ * @reserved[3]
+ */
+struct hbm_power_gate {
+ u8 hbm_cmd;
+ u8 reserved[3];
+} __packed;
+
+/**
* struct hbm_client_connect_request - connect/disconnect request
*
* @hbm_cmd - bus message command header
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 4460975c0ee..00692922248 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -74,9 +74,13 @@ int mei_reset(struct mei_device *dev)
if (state != MEI_DEV_INITIALIZING &&
state != MEI_DEV_DISABLED &&
state != MEI_DEV_POWER_DOWN &&
- state != MEI_DEV_POWER_UP)
- dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
- mei_dev_state_str(state));
+ state != MEI_DEV_POWER_UP) {
+ struct mei_fw_status fw_status;
+ mei_fw_status(dev, &fw_status);
+ dev_warn(&dev->pdev->dev,
+ "unexpected reset: dev_state = %s " FW_STS_FMT "\n",
+ mei_dev_state_str(state), FW_STS_PRM(fw_status));
+ }
/* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error
@@ -118,8 +122,8 @@ int mei_reset(struct mei_device *dev)
mei_amthif_reset_params(dev);
}
+ mei_hbm_reset(dev);
- dev->me_clients_num = 0;
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
@@ -303,15 +307,58 @@ void mei_stop(struct mei_device *dev)
}
EXPORT_SYMBOL_GPL(mei_stop);
+/**
+ * mei_write_is_idle - check if the write queues are idle
+ *
+ * @dev: the device structure
+ *
+ * returns true of there is no pending write
+ */
+bool mei_write_is_idle(struct mei_device *dev)
+{
+ bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
+ list_empty(&dev->ctrl_wr_list.list) &&
+ list_empty(&dev->write_list.list));
+ dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n",
+ idle,
+ mei_dev_state_str(dev->dev_state),
+ list_empty(&dev->ctrl_wr_list.list),
+ list_empty(&dev->write_list.list));
-void mei_device_init(struct mei_device *dev)
+ return idle;
+}
+EXPORT_SYMBOL_GPL(mei_write_is_idle);
+
+int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status)
+{
+ int i;
+ const struct mei_fw_status *fw_src = &dev->cfg->fw_status;
+
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = fw_src->count;
+ for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
+ int ret;
+ ret = pci_read_config_dword(dev->pdev,
+ fw_src->status[i], &fw_status->status[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_fw_status);
+
+void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg)
{
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
INIT_LIST_HEAD(&dev->device_list);
mutex_init(&dev->device_lock);
init_waitqueue_head(&dev->wait_hw_ready);
+ init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_recvd_msg);
init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
@@ -340,6 +387,9 @@ void mei_device_init(struct mei_device *dev)
* 0: Reserved for MEI Bus Message communications
*/
bitmap_set(dev->host_clients_map, 0, 1);
+
+ dev->pg_event = MEI_PG_EVENT_IDLE;
+ dev->cfg = cfg;
}
EXPORT_SYMBOL_GPL(mei_device_init);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 29b5af8efb7..4e3cba6da3f 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -455,8 +455,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
cl->status = 0;
list_del(&cb->list);
- if (MEI_WRITING == cl->writing_state &&
- cb->fop_type == MEI_FOP_WRITE &&
+ if (cb->fop_type == MEI_FOP_WRITE &&
cl != &dev->iamthif_cl) {
cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
cl->writing_state = MEI_WRITE_COMPLETE;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index b35594dbf52..66f0a1a0645 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -467,7 +467,6 @@ static int mei_ioctl_connect_client(struct file *file,
}
cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
cl->me_client_id);
@@ -644,8 +643,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
goto out;
}
- if (MEI_WRITE_COMPLETE == cl->writing_state)
- mask |= (POLLIN | POLLRDNORM);
+ mask |= (POLLIN | POLLRDNORM);
out:
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 94a516716d2..5c7e990e2f2 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -153,6 +153,20 @@ struct mei_msg_data {
unsigned char *data;
};
+/* Maximum number of processed FW status registers */
+#define MEI_FW_STATUS_MAX 2
+
+/*
+ * struct mei_fw_status - storage of FW status data
+ *
+ * @count - number of actually available elements in array
+ * @status - FW status registers
+ */
+struct mei_fw_status {
+ int count;
+ u32 status[MEI_FW_STATUS_MAX];
+};
+
/**
* struct mei_me_client - representation of me (fw) client
*
@@ -213,6 +227,7 @@ struct mei_cl {
/** struct mei_hw_ops
*
+ * @fw_status - read FW status from PCI config space
* @host_is_ready - query for host readiness
* @hw_is_ready - query if hw is ready
@@ -220,6 +235,9 @@ struct mei_cl {
* @hw_start - start hw after reset
* @hw_config - configure hw
+ * @pg_state - power gating state of the device
+ * @pg_is_enabled - is power gating enabled
+
* @intr_clear - clear pending interrupts
* @intr_enable - enable interrupts
* @intr_disable - disable interrupts
@@ -237,6 +255,8 @@ struct mei_cl {
*/
struct mei_hw_ops {
+ int (*fw_status)(struct mei_device *dev,
+ struct mei_fw_status *fw_status);
bool (*host_is_ready)(struct mei_device *dev);
bool (*hw_is_ready)(struct mei_device *dev);
@@ -244,6 +264,9 @@ struct mei_hw_ops {
int (*hw_start)(struct mei_device *dev);
void (*hw_config)(struct mei_device *dev);
+ enum mei_pg_state (*pg_state)(struct mei_device *dev);
+ bool (*pg_is_enabled)(struct mei_device *dev);
+
void (*intr_clear)(struct mei_device *dev);
void (*intr_enable)(struct mei_device *dev);
void (*intr_disable)(struct mei_device *dev);
@@ -331,16 +354,61 @@ struct mei_cl_device {
void *priv_data;
};
+
+ /**
+ * enum mei_pg_event - power gating transition events
+ *
+ * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition
+ * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete
+ * @MEI_PG_EVENT_RECEIVED: the driver received pg event
+ */
+enum mei_pg_event {
+ MEI_PG_EVENT_IDLE,
+ MEI_PG_EVENT_WAIT,
+ MEI_PG_EVENT_RECEIVED,
+};
+
+/**
+ * enum mei_pg_state - device internal power gating state
+ *
+ * @MEI_PG_OFF: device is not power gated - it is active
+ * @MEI_PG_ON: device is power gated - it is in lower power state
+ */
+enum mei_pg_state {
+ MEI_PG_OFF = 0,
+ MEI_PG_ON = 1,
+};
+
+/*
+ * mei_cfg
+ *
+ * @fw_status - FW status
+ * @quirk_probe - device exclusion quirk
+ */
+struct mei_cfg {
+ const struct mei_fw_status fw_status;
+ bool (*quirk_probe)(struct pci_dev *pdev);
+};
+
+
+#define MEI_PCI_DEVICE(dev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+
/**
* struct mei_device - MEI private device struct
* @reset_count - limits the number of consecutive resets
* @hbm_state - state of host bus message protocol
+ * @pg_event - power gating event
* @mem_addr - mem mapped base register address
* @hbuf_depth - depth of hardware host/write buffer is slots
* @hbuf_is_ready - query if the host host/write buffer is ready
* @wr_msg - the buffer for hbm control messages
+ * @cfg - per device generation config and ops
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@@ -371,6 +439,7 @@ struct mei_device {
* waiting queue for receive message from FW
*/
wait_queue_head_t wait_hw_ready;
+ wait_queue_head_t wait_pg;
wait_queue_head_t wait_recvd_msg;
wait_queue_head_t wait_stop_wd;
@@ -382,6 +451,14 @@ struct mei_device {
enum mei_hbm_state hbm_state;
u16 init_clients_timer;
+ /*
+ * Power Gating support
+ */
+ enum mei_pg_event pg_event;
+#ifdef CONFIG_PM_RUNTIME
+ struct dev_pm_domain pg_domain;
+#endif /* CONFIG_PM_RUNTIME */
+
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 rd_msg_hdr;
@@ -442,6 +519,7 @@ struct mei_device {
const struct mei_hw_ops *ops;
+ const struct mei_cfg *cfg;
char hw[0] __aligned(sizeof(void *));
};
@@ -474,7 +552,7 @@ static inline u32 mei_slots2data(int slots)
/*
* mei init function prototypes
*/
-void mei_device_init(struct mei_device *dev);
+void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg);
int mei_reset(struct mei_device *dev);
int mei_start(struct mei_device *dev);
int mei_restart(struct mei_device *dev);
@@ -553,10 +631,22 @@ void mei_watchdog_unregister(struct mei_device *dev);
* Register Access Function
*/
+
static inline void mei_hw_config(struct mei_device *dev)
{
dev->ops->hw_config(dev);
}
+
+static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
+{
+ return dev->ops->pg_state(dev);
+}
+
+static inline bool mei_pg_is_enabled(struct mei_device *dev)
+{
+ return dev->ops->pg_is_enabled(dev);
+}
+
static inline int mei_hw_reset(struct mei_device *dev, bool enable)
{
return dev->ops->hw_reset(dev, enable);
@@ -629,8 +719,17 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status);
+
+#define FW_STS_FMT "%08X %08X"
+#define FW_STS_PRM(fw_status) \
+ (fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \
+ (fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF
+
bool mei_hbuf_acquire(struct mei_device *dev);
+bool mei_write_is_idle(struct mei_device *dev);
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
int mei_dbgfs_register(struct mei_device *dev, const char *name);
void mei_dbgfs_deregister(struct mei_device *dev);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 1c8fd3a3e13..1b46c64a649 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -33,6 +33,8 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
+
#include <linux/mei.h>
#include "mei_dev.h"
@@ -42,42 +44,44 @@
/* mei_pci_tbl - PCI Device ID Table */
static const struct pci_device_id mei_me_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)},
/* required last entry */
{0, }
@@ -85,28 +89,33 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
+#ifdef CONFIG_PM_RUNTIME
+static inline void mei_me_set_pm_domain(struct mei_device *dev);
+static inline void mei_me_unset_pm_domain(struct mei_device *dev);
+#else
+static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
+static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
+#endif /* CONFIG_PM_RUNTIME */
+
/**
* mei_quirk_probe - probe for devices that doesn't valid ME interface
*
* @pdev: PCI device structure
- * @ent: entry into pci_device_table
+ * @cfg: per generation config
*
* returns true if ME Interface is valid, false otherwise
*/
static bool mei_me_quirk_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct mei_cfg *cfg)
{
- u32 reg;
- if (ent->device == MEI_DEV_ID_PBG_1) {
- pci_read_config_dword(pdev, 0x48, &reg);
- /* make sure that bit 9 is up and bit 10 is down */
- if ((reg & 0x600) == 0x200) {
- dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
- return false;
- }
+ if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
+ dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+ return false;
}
+
return true;
}
+
/**
* mei_probe - Device Initialization Routine
*
@@ -117,15 +126,14 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,
*/
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
struct mei_device *dev;
struct mei_me_hw *hw;
int err;
- if (!mei_me_quirk_probe(pdev, ent)) {
- err = -ENODEV;
- goto end;
- }
+ if (!mei_me_quirk_probe(pdev, cfg))
+ return -ENODEV;
/* enable pci dev */
err = pci_enable_device(pdev);
@@ -157,7 +165,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(pdev);
+ dev = mei_me_dev_init(pdev, cfg);
if (!dev) {
err = -ENOMEM;
goto release_regions;
@@ -196,6 +204,9 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto release_irq;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
err = mei_register(dev);
if (err)
goto release_irq;
@@ -204,6 +215,17 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
schedule_delayed_work(&dev->timer_work, HZ);
+ /*
+ * For not wake-able HW runtime pm framework
+ * can't be used on pci device level.
+ * Use domain runtime pm callbacks instead.
+ */
+ if (!pci_dev_run_wake(pdev))
+ mei_me_set_pm_domain(dev);
+
+ if (mei_pg_is_enabled(dev))
+ pm_runtime_put_noidle(&pdev->dev);
+
dev_dbg(&pdev->dev, "initialization successful.\n");
return 0;
@@ -243,12 +265,18 @@ static void mei_me_remove(struct pci_dev *pdev)
if (!dev)
return;
+ if (mei_pg_is_enabled(dev))
+ pm_runtime_get_noresume(&pdev->dev);
+
hw = to_me_hw(dev);
dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev);
+ if (!pci_dev_run_wake(pdev))
+ mei_me_unset_pm_domain(dev);
+
/* disable interrupts */
mei_disable_interrupts(dev);
@@ -327,12 +355,120 @@ static int mei_me_pci_resume(struct device *device)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int mei_me_pm_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+ if (mei_write_is_idle(dev))
+ pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2);
+
+ return -EBUSY;
+}
+
+static int mei_me_pm_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ if (mei_write_is_idle(dev))
+ ret = mei_me_pg_set_sync(dev);
+ else
+ ret = -EAGAIN;
+
+ mutex_unlock(&dev->device_lock);
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
+
+ return ret;
+}
+
+static int mei_me_pm_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ ret = mei_me_pg_unset_sync(dev);
+
+ mutex_unlock(&dev->device_lock);
+
+ dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * mei_me_set_pm_domain - fill and set pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_me_set_pm_domain(struct mei_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (pdev->dev.bus && pdev->dev.bus->pm) {
+ dev->pg_domain.ops = *pdev->dev.bus->pm;
+
+ dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
+ dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
+ dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
+
+ pdev->dev.pm_domain = &dev->pg_domain;
+ }
+}
+
+/**
+ * mei_me_unset_pm_domain - clean pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_me_unset_pm_domain(struct mei_device *dev)
+{
+ /* stop using pm callbacks if any */
+ dev->pdev->dev.pm_domain = NULL;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops mei_me_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
+ mei_me_pci_resume)
+ SET_RUNTIME_PM_OPS(
+ mei_me_pm_runtime_suspend,
+ mei_me_pm_runtime_resume,
+ mei_me_pm_runtime_idle)
+};
-static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
#define MEI_ME_PM_OPS (&mei_me_pm_ops)
#else
#define MEI_ME_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
/*
* PCI driver structure
*/
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index ad3adb009a1..2343c6236df 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -27,6 +27,7 @@
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
#include <linux/mei.h>
@@ -35,11 +36,18 @@
#include "hw-txe.h"
static const struct pci_device_id mei_txe_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F18)}, /* Baytrail */
+ {MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */
{0, }
};
MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
+#ifdef CONFIG_PM_RUNTIME
+static inline void mei_txe_set_pm_domain(struct mei_device *dev);
+static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
+#else
+static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
+static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
+#endif /* CONFIG_PM_RUNTIME */
static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
{
@@ -61,6 +69,7 @@ static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
*/
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
struct mei_device *dev;
struct mei_txe_hw *hw;
int err;
@@ -91,7 +100,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_txe_dev_init(pdev);
+ dev = mei_txe_dev_init(pdev, cfg);
if (!dev) {
err = -ENOMEM;
goto release_regions;
@@ -137,12 +146,25 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto release_irq;
}
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
err = mei_register(dev);
if (err)
goto release_irq;
pci_set_drvdata(pdev, dev);
+ /*
+ * For not wake-able HW runtime pm framework
+ * can't be used on pci device level.
+ * Use domain runtime pm callbacks instead.
+ */
+ if (!pci_dev_run_wake(pdev))
+ mei_txe_set_pm_domain(dev);
+
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
release_irq:
@@ -187,10 +209,15 @@ static void mei_txe_remove(struct pci_dev *pdev)
return;
}
+ pm_runtime_get_noresume(&pdev->dev);
+
hw = to_txe_hw(dev);
mei_stop(dev);
+ if (!pci_dev_run_wake(pdev))
+ mei_txe_unset_pm_domain(dev);
+
/* disable interrupts */
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
@@ -265,15 +292,131 @@ static int mei_txe_pci_resume(struct device *device)
return err;
}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int mei_txe_pm_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+ if (mei_write_is_idle(dev))
+ pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2);
+
+ return -EBUSY;
+}
+static int mei_txe_pm_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ if (mei_write_is_idle(dev))
+ ret = mei_txe_aliveness_set_sync(dev, 0);
+ else
+ ret = -EAGAIN;
+
+ /*
+ * If everything is okay we're about to enter PCI low
+ * power state (D3) therefor we need to disable the
+ * interrupts towards host.
+ * However if device is not wakeable we do not enter
+ * D-low state and we need to keep the interrupt kicking
+ */
+ if (!ret && pci_dev_run_wake(pdev))
+ mei_disable_interrupts(dev);
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
+
+ mutex_unlock(&dev->device_lock);
+ return ret;
+}
+
+static int mei_txe_pm_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int ret;
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n");
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ mei_enable_interrupts(dev);
+
+ ret = mei_txe_aliveness_set_sync(dev, 1);
+
+ mutex_unlock(&dev->device_lock);
+
+ dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * mei_txe_set_pm_domain - fill and set pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_txe_set_pm_domain(struct mei_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (pdev->dev.bus && pdev->dev.bus->pm) {
+ dev->pg_domain.ops = *pdev->dev.bus->pm;
+
+ dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
+ dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
+ dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
+
+ pdev->dev.pm_domain = &dev->pg_domain;
+ }
+}
-static SIMPLE_DEV_PM_OPS(mei_txe_pm_ops,
- mei_txe_pci_suspend,
- mei_txe_pci_resume);
+/**
+ * mei_txe_unset_pm_domain - clean pm domian stucture for device
+ *
+ * @dev: mei_device
+ */
+static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
+{
+ /* stop using pm callbacks if any */
+ dev->pdev->dev.pm_domain = NULL;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops mei_txe_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
+ mei_txe_pci_resume)
+ SET_RUNTIME_PM_OPS(
+ mei_txe_pm_runtime_suspend,
+ mei_txe_pm_runtime_resume,
+ mei_txe_pm_runtime_idle)
+};
#define MEI_TXE_PM_OPS (&mei_txe_pm_ops)
#else
#define MEI_TXE_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
/*
* PCI driver structure
*/
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index ebf1cbc198f..a84a664dfcc 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -84,8 +84,6 @@ int mei_wd_host_init(struct mei_device *dev)
return ret;
}
- cl->state = MEI_FILE_CONNECTING;
-
ret = mei_cl_connect(cl, NULL);
if (ret) {
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 0535d1e0bc7..104a05f6b73 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -6,7 +6,7 @@
* This file supports the user system call for file open, close, mmap, etc.
* This also incudes the driver initialization code.
*
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2008-2014 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,6 +58,11 @@ static int max_user_cbrs, max_user_dsr_bytes;
static struct miscdevice gru_miscdev;
+static int gru_supported(void)
+{
+ return is_uv_system() &&
+ (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE);
+}
/*
* gru_vma_close
@@ -518,7 +523,7 @@ static int __init gru_init(void)
{
int ret;
- if (!is_uv_system() || (is_uvx_hub() && !is_uv2_hub()))
+ if (!gru_supported())
return 0;
#if defined CONFIG_IA64
@@ -573,7 +578,7 @@ exit0:
static void __exit gru_exit(void)
{
- if (!is_uv_system())
+ if (!gru_supported())
return;
gru_teardown_tlb_irqs();
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
new file mode 100644
index 00000000000..73068e50e56
--- /dev/null
+++ b/drivers/misc/vexpress-syscfg.c
@@ -0,0 +1,324 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/vexpress.h>
+
+
+#define SYS_CFGDATA 0x0
+
+#define SYS_CFGCTRL 0x4
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x8
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[0]; /* Keep it last! */
+};
+
+
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
+{
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
+
+ if (WARN_ON(index > func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
+ }
+
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ if (dev->of_node) {
+ int err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+ } else {
+ if (pdev->num_resources != 1 ||
+ pdev->resource[0].flags != IORESOURCE_BUS)
+ return ERR_PTR(-EFAULT);
+
+ site = pdev->resource[0].start;
+ if (site == VEXPRESS_SITE_MASTER)
+ site = vexpress_config_get_master();
+ position = 0;
+ dcc = 0;
+ num = 1;
+ }
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(sizeof(*func) + sizeof(*func->template) * num,
+ GFP_KERNEL);
+ if (!func)
+ return NULL;
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ if (dev->of_node) {
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+ } else {
+ function = pdev->resource[0].end;
+ device = pdev->id;
+ }
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
+ }
+
+ vexpress_syscfg_regmap_config.max_register = num - 1;
+
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
+
+ if (IS_ERR(func->regmap))
+ kfree(func);
+ else
+ list_add(&func->list, &syscfg->funcs);
+
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
+{
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
+
+ regmap_exit(regmap);
+
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
+}
+
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+/* Non-DT hack, to be gone... */
+static struct device *vexpress_syscfg_bridge;
+
+int vexpress_syscfg_device_register(struct platform_device *pdev)
+{
+ pdev->dev.parent = vexpress_syscfg_bridge;
+
+ return platform_device_register(pdev);
+}
+
+
+int vexpress_syscfg_probe(struct platform_device *pdev)
+{
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct device *bridge;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
+
+ syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!syscfg->base)
+ return -EFAULT;
+
+ /* Must use dev.parent (MFD), as that's where DT phandle points at... */
+ bridge = vexpress_config_bridge_register(pdev->dev.parent,
+ &vexpress_syscfg_bridge_ops, syscfg);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ /* Non-DT case */
+ if (!pdev->dev.of_node)
+ vexpress_syscfg_bridge = bridge;
+
+ return 0;
+}
+
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+
+static int __init vexpress_syscfg_init(void)
+{
+ return platform_driver_register(&vexpress_syscfg_driver);
+}
+core_initcall(vexpress_syscfg_init);