summaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/atmel-ssc.c8
-rw-r--r--drivers/misc/c2port/core.c83
-rw-r--r--drivers/misc/enclosure.c29
-rw-r--r--drivers/misc/hpilo.c4
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c26
-rw-r--r--drivers/misc/ics932s401.c4
-rw-r--r--drivers/misc/lkdtm.c63
-rw-r--r--drivers/misc/mei/amthif.c14
-rw-r--r--drivers/misc/mei/bus.c14
-rw-r--r--drivers/misc/mei/client.c15
-rw-r--r--drivers/misc/mei/client.h9
-rw-r--r--drivers/misc/mei/hw-me.c9
-rw-r--r--drivers/misc/mei/init.c11
-rw-r--r--drivers/misc/mei/main.c22
-rw-r--r--drivers/misc/sram.c3
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h7
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c22
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c315
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h18
22 files changed, 339 insertions, 343 deletions
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index e068a76a5f6..5be808406ed 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/pinctrl/consumer.h>
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
@@ -137,13 +136,6 @@ static int ssc_probe(struct platform_device *pdev)
struct resource *regs;
struct ssc_device *ssc;
const struct atmel_ssc_platform_data *plat_dat;
- struct pinctrl *pinctrl;
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- dev_err(&pdev->dev, "Failed to request pinctrl\n");
- return PTR_ERR(pinctrl);
- }
ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index f32550a74bd..464419b3644 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -311,6 +311,7 @@ static ssize_t c2port_show_name(struct device *dev,
return sprintf(buf, "%s\n", c2dev->name);
}
+static DEVICE_ATTR(name, 0444, c2port_show_name, NULL);
static ssize_t c2port_show_flash_blocks_num(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -320,6 +321,7 @@ static ssize_t c2port_show_flash_blocks_num(struct device *dev,
return sprintf(buf, "%d\n", ops->blocks_num);
}
+static DEVICE_ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL);
static ssize_t c2port_show_flash_block_size(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -329,6 +331,7 @@ static ssize_t c2port_show_flash_block_size(struct device *dev,
return sprintf(buf, "%d\n", ops->block_size);
}
+static DEVICE_ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL);
static ssize_t c2port_show_flash_size(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -338,18 +341,18 @@ static ssize_t c2port_show_flash_size(struct device *dev,
return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size);
}
+static DEVICE_ATTR(flash_size, 0444, c2port_show_flash_size, NULL);
-static ssize_t c2port_show_access(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t access_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", c2dev->access);
}
-static ssize_t c2port_store_access(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t access_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
@@ -375,6 +378,7 @@ static ssize_t c2port_store_access(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(access);
static ssize_t c2port_store_reset(struct device *dev,
struct device_attribute *attr,
@@ -395,6 +399,7 @@ static ssize_t c2port_store_reset(struct device *dev,
return count;
}
+static DEVICE_ATTR(reset, 0200, NULL, c2port_store_reset);
static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf)
{
@@ -431,6 +436,7 @@ static ssize_t c2port_show_dev_id(struct device *dev,
return ret;
}
+static DEVICE_ATTR(dev_id, 0444, c2port_show_dev_id, NULL);
static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf)
{
@@ -467,6 +473,7 @@ static ssize_t c2port_show_rev_id(struct device *dev,
return ret;
}
+static DEVICE_ATTR(rev_id, 0444, c2port_show_rev_id, NULL);
static ssize_t c2port_show_flash_access(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -536,6 +543,8 @@ static ssize_t c2port_store_flash_access(struct device *dev,
return count;
}
+static DEVICE_ATTR(flash_access, 0644, c2port_show_flash_access,
+ c2port_store_flash_access);
static ssize_t __c2port_write_flash_erase(struct c2port_device *dev)
{
@@ -616,6 +625,7 @@ static ssize_t c2port_store_flash_erase(struct device *dev,
return count;
}
+static DEVICE_ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase);
static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
char *buffer, loff_t offset, size_t count)
@@ -846,35 +856,40 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
return ret;
}
+/* size is computed at run-time */
+static BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
+ c2port_write_flash_data, 0);
/*
* Class attributes
*/
+static struct attribute *c2port_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_flash_blocks_num.attr,
+ &dev_attr_flash_block_size.attr,
+ &dev_attr_flash_size.attr,
+ &dev_attr_access.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_dev_id.attr,
+ &dev_attr_rev_id.attr,
+ &dev_attr_flash_access.attr,
+ &dev_attr_flash_erase.attr,
+ NULL,
+};
-static struct device_attribute c2port_attrs[] = {
- __ATTR(name, 0444, c2port_show_name, NULL),
- __ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL),
- __ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL),
- __ATTR(flash_size, 0444, c2port_show_flash_size, NULL),
- __ATTR(access, 0644, c2port_show_access, c2port_store_access),
- __ATTR(reset, 0200, NULL, c2port_store_reset),
- __ATTR(dev_id, 0444, c2port_show_dev_id, NULL),
- __ATTR(rev_id, 0444, c2port_show_rev_id, NULL),
-
- __ATTR(flash_access, 0644, c2port_show_flash_access,
- c2port_store_flash_access),
- __ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase),
- __ATTR_NULL,
+static struct bin_attribute *c2port_bin_attrs[] = {
+ &bin_attr_flash_data,
+ NULL,
};
-static struct bin_attribute c2port_bin_attrs = {
- .attr = {
- .name = "flash_data",
- .mode = 0644
- },
- .read = c2port_read_flash_data,
- .write = c2port_write_flash_data,
- /* .size is computed at run-time */
+static const struct attribute_group c2port_group = {
+ .attrs = c2port_attrs,
+ .bin_attrs = c2port_bin_attrs,
+};
+
+static const struct attribute_group *c2port_groups[] = {
+ &c2port_group,
+ NULL,
};
/*
@@ -907,6 +922,8 @@ struct c2port_device *c2port_device_register(char *name,
goto error_idr_alloc;
c2dev->id = ret;
+ bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", c2dev->id);
if (unlikely(IS_ERR(c2dev->dev))) {
@@ -919,12 +936,6 @@ struct c2port_device *c2port_device_register(char *name,
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
- /* Create binary file */
- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
- ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
- if (unlikely(ret))
- goto error_device_create_bin_file;
-
/* By default C2 port access is off */
c2dev->access = c2dev->flash_access = 0;
ops->access(c2dev, 0);
@@ -937,9 +948,6 @@ struct c2port_device *c2port_device_register(char *name,
return c2dev;
-error_device_create_bin_file:
- device_destroy(c2port_class, 0);
-
error_device_create:
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
@@ -959,7 +967,6 @@ void c2port_device_unregister(struct c2port_device *c2dev)
dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name);
- device_remove_bin_file(c2dev->dev, &c2port_bin_attrs);
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
spin_unlock_irq(&c2port_idr_lock);
@@ -984,7 +991,7 @@ static int __init c2port_init(void)
printk(KERN_ERR "c2port: failed to allocate class\n");
return PTR_ERR(c2port_class);
}
- c2port_class->dev_attrs = c2port_attrs;
+ c2port_class->dev_groups = c2port_groups;
return 0;
}
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 00e5fcac8fd..0e8df41aaf1 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -239,7 +239,7 @@ static void enclosure_component_release(struct device *dev)
put_device(dev->parent);
}
-static const struct attribute_group *enclosure_groups[];
+static const struct attribute_group *enclosure_component_groups[];
/**
* enclosure_component_register - add a particular component to an enclosure
@@ -282,7 +282,7 @@ enclosure_component_register(struct enclosure_device *edev,
dev_set_name(cdev, "%u", number);
cdev->release = enclosure_component_release;
- cdev->groups = enclosure_groups;
+ cdev->groups = enclosure_component_groups;
err = device_register(cdev);
if (err) {
@@ -365,25 +365,26 @@ EXPORT_SYMBOL_GPL(enclosure_remove_device);
* sysfs pieces below
*/
-static ssize_t enclosure_show_components(struct device *cdev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t components_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
return snprintf(buf, 40, "%d\n", edev->components);
}
+static DEVICE_ATTR_RO(components);
-static struct device_attribute enclosure_attrs[] = {
- __ATTR(components, S_IRUGO, enclosure_show_components, NULL),
- __ATTR_NULL
+static struct attribute *enclosure_class_attrs[] = {
+ &dev_attr_components.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(enclosure_class);
static struct class enclosure_class = {
.name = "enclosure",
.owner = THIS_MODULE,
.dev_release = enclosure_release,
- .dev_attrs = enclosure_attrs,
+ .dev_groups = enclosure_class_groups,
};
static const char *const enclosure_status [] = {
@@ -536,15 +537,7 @@ static struct attribute *enclosure_component_attrs[] = {
&dev_attr_type.attr,
NULL
};
-
-static struct attribute_group enclosure_group = {
- .attrs = enclosure_component_attrs,
-};
-
-static const struct attribute_group *enclosure_groups[] = {
- &enclosure_group,
- NULL
-};
+ATTRIBUTE_GROUPS(enclosure_component);
static int __init enclosure_init(void)
{
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 621c7a37339..b83e3ca12a4 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -759,7 +759,7 @@ static int ilo_probe(struct pci_dev *pdev,
/* Ignore subsystem_device = 0x1979 (set by BIOS) */
if (pdev->subsystem_device == 0x1979)
- goto out;
+ return 0;
if (max_ccb > MAX_CCB)
max_ccb = MAX_CCB;
@@ -899,7 +899,7 @@ static void __exit ilo_exit(void)
class_destroy(ilo_class);
}
-MODULE_VERSION("1.4");
+MODULE_VERSION("1.4.1");
MODULE_ALIAS(ILO_NAME);
MODULE_DESCRIPTION(ILO_NAME);
MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index ce5b75616b4..e8b933111e0 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -149,8 +149,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
return ret;
}
-static struct dentry *ibmasmfs_create_file (struct super_block *sb,
- struct dentry *parent,
+static struct dentry *ibmasmfs_create_file(struct dentry *parent,
const char *name,
const struct file_operations *fops,
void *data,
@@ -163,7 +162,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb,
if (!dentry)
return NULL;
- inode = ibmasmfs_make_inode(sb, S_IFREG | mode);
+ inode = ibmasmfs_make_inode(parent->d_sb, S_IFREG | mode);
if (!inode) {
dput(dentry);
return NULL;
@@ -176,8 +175,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb,
return dentry;
}
-static struct dentry *ibmasmfs_create_dir (struct super_block *sb,
- struct dentry *parent,
+static struct dentry *ibmasmfs_create_dir(struct dentry *parent,
const char *name)
{
struct dentry *dentry;
@@ -187,7 +185,7 @@ static struct dentry *ibmasmfs_create_dir (struct super_block *sb,
if (!dentry)
return NULL;
- inode = ibmasmfs_make_inode(sb, S_IFDIR | 0500);
+ inode = ibmasmfs_make_inode(parent->d_sb, S_IFDIR | 0500);
if (!inode) {
dput(dentry);
return NULL;
@@ -612,20 +610,20 @@ static void ibmasmfs_create_files (struct super_block *sb)
struct dentry *dir;
struct dentry *remote_dir;
sp = list_entry(entry, struct service_processor, node);
- dir = ibmasmfs_create_dir(sb, sb->s_root, sp->dirname);
+ dir = ibmasmfs_create_dir(sb->s_root, sp->dirname);
if (!dir)
continue;
- ibmasmfs_create_file(sb, dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
- remote_dir = ibmasmfs_create_dir(sb, dir, "remote_video");
+ remote_dir = ibmasmfs_create_dir(dir, "remote_video");
if (!remote_dir)
continue;
- ibmasmfs_create_file(sb, remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
}
}
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 00295367c06..28f51e01fd2 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -2,7 +2,7 @@
* A driver for the Integrated Circuits ICS932S401
* Copyright (C) 2008 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -482,7 +482,7 @@ static int ics932s401_remove(struct i2c_client *client)
module_i2c_driver(ics932s401_driver);
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("ICS932S401 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 08aad69c8da..2fc0586ce3b 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
@@ -50,6 +51,7 @@
#define DEFAULT_COUNT 10
#define REC_NUM_DEFAULT 10
+#define EXEC_SIZE 64
enum cname {
CN_INVALID,
@@ -68,6 +70,7 @@ enum ctype {
CT_NONE,
CT_PANIC,
CT_BUG,
+ CT_WARNING,
CT_EXCEPTION,
CT_LOOP,
CT_OVERFLOW,
@@ -77,7 +80,12 @@ enum ctype {
CT_WRITE_AFTER_FREE,
CT_SOFTLOCKUP,
CT_HARDLOCKUP,
+ CT_SPINLOCKUP,
CT_HUNG_TASK,
+ CT_EXEC_DATA,
+ CT_EXEC_STACK,
+ CT_EXEC_KMALLOC,
+ CT_EXEC_VMALLOC,
};
static char* cp_name[] = {
@@ -95,6 +103,7 @@ static char* cp_name[] = {
static char* cp_type[] = {
"PANIC",
"BUG",
+ "WARNING",
"EXCEPTION",
"LOOP",
"OVERFLOW",
@@ -104,7 +113,12 @@ static char* cp_type[] = {
"WRITE_AFTER_FREE",
"SOFTLOCKUP",
"HARDLOCKUP",
+ "SPINLOCKUP",
"HUNG_TASK",
+ "EXEC_DATA",
+ "EXEC_STACK",
+ "EXEC_KMALLOC",
+ "EXEC_VMALLOC",
};
static struct jprobe lkdtm;
@@ -121,6 +135,9 @@ static enum cname cpoint = CN_INVALID;
static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(count_lock);
+static DEFINE_SPINLOCK(lock_me_up);
+
+static u8 data_area[EXEC_SIZE];
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -275,6 +292,19 @@ static int recursive_loop(int a)
return recursive_loop(a);
}
+static void do_nothing(void)
+{
+ return;
+}
+
+static void execute_location(void *dst)
+{
+ void (*func)(void) = dst;
+
+ memcpy(dst, do_nothing, EXEC_SIZE);
+ func();
+}
+
static void lkdtm_do_action(enum ctype which)
{
switch (which) {
@@ -284,6 +314,9 @@ static void lkdtm_do_action(enum ctype which)
case CT_BUG:
BUG();
break;
+ case CT_WARNING:
+ WARN_ON(1);
+ break;
case CT_EXCEPTION:
*((int *) 0) = 0;
break;
@@ -295,10 +328,10 @@ static void lkdtm_do_action(enum ctype which)
(void) recursive_loop(0);
break;
case CT_CORRUPT_STACK: {
- volatile u32 data[8];
- volatile u32 *p = data;
+ /* Make sure the compiler creates and uses an 8 char array. */
+ volatile char data[8];
- p[12] = 0x12345678;
+ memset((void *)data, 0, 64);
break;
}
case CT_UNALIGNED_LOAD_STORE_WRITE: {
@@ -340,10 +373,34 @@ static void lkdtm_do_action(enum ctype which)
for (;;)
cpu_relax();
break;
+ case CT_SPINLOCKUP:
+ /* Must be called twice to trigger. */
+ spin_lock(&lock_me_up);
+ break;
case CT_HUNG_TASK:
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
break;
+ case CT_EXEC_DATA:
+ execute_location(data_area);
+ break;
+ case CT_EXEC_STACK: {
+ u8 stack_area[EXEC_SIZE];
+ execute_location(stack_area);
+ break;
+ }
+ case CT_EXEC_KMALLOC: {
+ u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+ execute_location(kmalloc_area);
+ kfree(kmalloc_area);
+ break;
+ }
+ case CT_EXEC_VMALLOC: {
+ u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+ execute_location(vmalloc_area);
+ vfree(vmalloc_area);
+ break;
+ }
case CT_NONE:
default:
break;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 749452f8e2f..d0fdc134068 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -418,15 +418,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- mutex_unlock(&dev->device_lock);
+
poll_wait(file, &dev->iamthif_cl.wait, wait);
+
mutex_lock(&dev->device_lock);
- if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
- dev->iamthif_file_object == file) {
+ if (!mei_cl_is_connected(&dev->iamthif_cl)) {
+
+ mask = POLLERR;
+
+ } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
+ dev->iamthif_file_object == file) {
+
mask |= (POLLIN | POLLRDNORM);
dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
mei_amthif_run_next_cmd(dev);
}
+ mutex_unlock(&dev->device_lock);
+
return mask;
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 9ecd49a7be1..6d0282c08a0 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -47,7 +47,7 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
id = driver->id_table;
while (id->name[0]) {
- if (!strcmp(dev_name(dev), id->name))
+ if (!strncmp(dev_name(dev), id->name, sizeof(id->name)))
return 1;
id++;
@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
dev_dbg(dev, "Device probe\n");
- strncpy(id.name, dev_name(dev), MEI_CL_NAME_SIZE);
+ strncpy(id.name, dev_name(dev), sizeof(id.name));
return driver->probe(device, &id);
}
@@ -108,11 +108,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute mei_cl_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *mei_cl_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(mei_cl_dev);
static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -124,7 +126,7 @@ static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct bus_type mei_cl_bus_type = {
.name = "mei",
- .dev_attrs = mei_cl_dev_attrs,
+ .dev_groups = mei_cl_dev_groups,
.match = mei_cl_device_match,
.probe = mei_cl_device_probe,
.remove = mei_cl_device_remove,
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 21d3f5aa835..e0684b4d9a0 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -635,10 +635,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
dev = cl->dev;
- if (cl->state != MEI_FILE_CONNECTED)
- return -ENODEV;
-
- if (dev->dev_state != MEI_DEV_ENABLED)
+ if (!mei_cl_is_connected(cl))
return -ENODEV;
if (cl->read_cb) {
@@ -892,18 +889,22 @@ void mei_cl_all_disconnect(struct mei_device *dev)
/**
- * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
+ * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
*
* @dev - mei device
*/
-void mei_cl_all_read_wakeup(struct mei_device *dev)
+void mei_cl_all_wakeup(struct mei_device *dev)
{
struct mei_cl *cl, *next;
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
if (waitqueue_active(&cl->rx_wait)) {
- dev_dbg(&dev->pdev->dev, "Waking up client!\n");
+ dev_dbg(&dev->pdev->dev, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
}
+ if (waitqueue_active(&cl->tx_wait)) {
+ dev_dbg(&dev->pdev->dev, "Waking up writing client!\n");
+ wake_up_interruptible(&cl->tx_wait);
+ }
}
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 26b157d8bad..9eb031e9207 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -84,6 +84,13 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/*
* MEI input output function prototype
*/
+static inline bool mei_cl_is_connected(struct mei_cl *cl)
+{
+ return (cl->dev &&
+ cl->dev->dev_state == MEI_DEV_ENABLED &&
+ cl->state == MEI_FILE_CONNECTED);
+}
+
bool mei_cl_is_other_connecting(struct mei_cl *cl);
int mei_cl_disconnect(struct mei_cl *cl);
int mei_cl_connect(struct mei_cl *cl, struct file *file);
@@ -99,7 +106,7 @@ void mei_host_client_init(struct work_struct *work);
void mei_cl_all_disconnect(struct mei_device *dev);
-void mei_cl_all_read_wakeup(struct mei_device *dev);
+void mei_cl_all_wakeup(struct mei_device *dev);
void mei_cl_all_write_clear(struct mei_device *dev);
#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index b22c7e24722..3412adcdaeb 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -176,21 +176,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
- dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
-
- hcsr |= (H_RST | H_IG);
+ hcsr |= H_RST | H_IG | H_IS;
if (intr_enable)
hcsr |= H_IE;
else
- hcsr |= ~H_IE;
+ hcsr &= ~H_IE;
- mei_hcsr_set(hw, hcsr);
+ mei_me_reg_write(hw, H_CSR, hcsr);
if (dev->dev_state == MEI_DEV_POWER_DOWN)
mei_me_hw_reset_release(dev);
- dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
return 0;
}
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index e6f16f83ecd..92c73118b13 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -154,8 +154,14 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->dev_state != MEI_DEV_POWER_DOWN)
dev->dev_state = MEI_DEV_RESETTING;
+ /* remove all waiting requests */
+ mei_cl_all_write_clear(dev);
+
mei_cl_all_disconnect(dev);
+ /* wake up all readings so they can be interrupted */
+ mei_cl_all_wakeup(dev);
+
/* remove entry if already in list */
dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
mei_cl_unlink(&dev->wd_cl);
@@ -196,11 +202,6 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
mei_hbm_start_req(dev);
- /* wake up all readings so they can be interrupted */
- mei_cl_all_read_wakeup(dev);
-
- /* remove all waiting requests */
- mei_cl_all_write_clear(dev);
}
EXPORT_SYMBOL_GPL(mei_reset);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 5e11b5b9b65..173ff095be0 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -625,24 +625,32 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
unsigned int mask = 0;
if (WARN_ON(!cl || !cl->dev))
- return mask;
+ return POLLERR;
dev = cl->dev;
mutex_lock(&dev->device_lock);
- if (dev->dev_state != MEI_DEV_ENABLED)
- goto out;
-
-
- if (cl == &dev->iamthif_cl) {
- mask = mei_amthif_poll(dev, file, wait);
+ if (!mei_cl_is_connected(cl)) {
+ mask = POLLERR;
goto out;
}
mutex_unlock(&dev->device_lock);
+
+
+ if (cl == &dev->iamthif_cl)
+ return mei_amthif_poll(dev, file, wait);
+
poll_wait(file, &cl->tx_wait, wait);
+
mutex_lock(&dev->device_lock);
+
+ if (!mei_cl_is_connected(cl)) {
+ mask = POLLERR;
+ goto out;
+ }
+
if (MEI_WRITE_COMPLETE == cl->writing_state)
mask |= (POLLIN | POLLRDNORM);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index d87cc91bc01..afe66571ce0 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -68,7 +68,8 @@ static int sram_probe(struct platform_device *pdev)
ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
res->start, size, -1);
if (ret < 0) {
- gen_pool_destroy(sram->pool);
+ if (sram->clk)
+ clk_disable_unprepare(sram->clk);
return ret;
}
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 0a142801635..8d64b681dd9 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -562,7 +562,9 @@ long st_register(struct st_proto_s *new_proto)
if ((st_gdata->protos_registered != ST_EMPTY) &&
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_err(" KIM failure complete callback ");
+ spin_lock_irqsave(&st_gdata->lock, flags);
st_reg_complete(st_gdata, err);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
}
return -EINVAL;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index cb56e270da1..2421835d5da 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
#define VMWARE_BALLOON_CMD(cmd, data, result) \
({ \
unsigned long __stat, __dummy1, __dummy2; \
- __asm__ __volatile__ ("inl (%%dx)" : \
+ __asm__ __volatile__ ("inl %%dx" : \
"=a"(__stat), \
"=c"(__dummy1), \
"=d"(__dummy2), \
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 7b3fce2da6c..3dee7ae123e 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.0.0.0-k");
+MODULE_VERSION("1.1.0.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index f69156a1f30..cee9e977d31 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -35,6 +35,13 @@ struct vmci_obj {
enum vmci_obj_type type;
};
+/*
+ * Needed by other components of this module. It's okay to have one global
+ * instance of this because there can only ever be one VMCI device. Our
+ * virtual hardware enforces this.
+ */
+extern struct pci_dev *vmci_pdev;
+
u32 vmci_get_context_id(void);
int vmci_send_datagram(struct vmci_datagram *dg);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 60c01999f48..b3a2b763ecf 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -65,9 +65,11 @@ struct vmci_guest_device {
void *data_buffer;
void *notification_bitmap;
+ dma_addr_t notification_base;
};
/* vmci_dev singleton device and supporting data*/
+struct pci_dev *vmci_pdev;
static struct vmci_guest_device *vmci_dev_g;
static DEFINE_SPINLOCK(vmci_dev_spinlock);
@@ -528,7 +530,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* well.
*/
if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
- vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
+ vmci_dev->notification_bitmap = dma_alloc_coherent(
+ &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
+ GFP_KERNEL);
if (!vmci_dev->notification_bitmap) {
dev_warn(&pdev->dev,
"Unable to allocate notification bitmap\n");
@@ -546,6 +550,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = vmci_dev;
+ vmci_pdev = pdev;
spin_unlock_irq(&vmci_dev_spinlock);
/*
@@ -553,9 +558,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* used.
*/
if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
- struct page *page =
- vmalloc_to_page(vmci_dev->notification_bitmap);
- unsigned long bitmap_ppn = page_to_pfn(page);
+ unsigned long bitmap_ppn =
+ vmci_dev->notification_base >> PAGE_SHIFT;
if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
dev_warn(&pdev->dev,
"VMCI device unable to register notification bitmap with PPN 0x%x\n",
@@ -665,11 +669,14 @@ err_remove_bitmap:
if (vmci_dev->notification_bitmap) {
iowrite32(VMCI_CONTROL_RESET,
vmci_dev->iobase + VMCI_CONTROL_ADDR);
- vfree(vmci_dev->notification_bitmap);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vmci_dev->notification_bitmap,
+ vmci_dev->notification_base);
}
err_remove_vmci_dev_g:
spin_lock_irq(&vmci_dev_spinlock);
+ vmci_pdev = NULL;
vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
@@ -699,6 +706,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = NULL;
+ vmci_pdev = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
dev_dbg(&pdev->dev, "Resetting vmci device\n");
@@ -727,7 +735,9 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
* device, so we can safely free it here.
*/
- vfree(vmci_dev->notification_bitmap);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vmci_dev->notification_bitmap,
+ vmci_dev->notification_base);
}
vfree(vmci_dev->data_buffer);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8ff2e5ee8fb..a0515a6d6eb 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uio.h>
@@ -146,14 +147,20 @@ typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
/* The Kernel specific component of the struct vmci_queue structure. */
struct vmci_queue_kern_if {
- struct page **page;
- struct page **header_page;
- void *va;
struct mutex __mutex; /* Protects the queue. */
struct mutex *mutex; /* Shared by producer and consumer queues. */
- bool host;
- size_t num_pages;
- bool mapped;
+ size_t num_pages; /* Number of pages incl. header. */
+ bool host; /* Host or guest? */
+ union {
+ struct {
+ dma_addr_t *pas;
+ void **vas;
+ } g; /* Used by the guest. */
+ struct {
+ struct page **page;
+ struct page **header_page;
+ } h; /* Used by the host. */
+ } u;
};
/*
@@ -265,76 +272,65 @@ static void qp_free_queue(void *q, u64 size)
struct vmci_queue *queue = q;
if (queue) {
- u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
+ u64 i;
- if (queue->kernel_if->mapped) {
- vunmap(queue->kernel_if->va);
- queue->kernel_if->va = NULL;
+ /* Given size does not include header, so add in a page here. */
+ for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
+ dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
+ queue->kernel_if->u.g.vas[i],
+ queue->kernel_if->u.g.pas[i]);
}
- while (i)
- __free_page(queue->kernel_if->page[--i]);
-
- vfree(queue->q_header);
+ vfree(queue);
}
}
/*
- * Allocates kernel VA space of specified size, plus space for the
- * queue structure/kernel interface and the queue header. Allocates
- * physical pages for the queue data pages.
- *
- * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
- * PAGE m+1: struct vmci_queue
- * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
- * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
+ * Allocates kernel queue pages of specified size with IOMMU mappings,
+ * plus space for the queue structure/kernel interface and the queue
+ * header.
*/
static void *qp_alloc_queue(u64 size, u32 flags)
{
u64 i;
struct vmci_queue *queue;
- struct vmci_queue_header *q_header;
- const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
- const uint queue_size =
- PAGE_SIZE +
- sizeof(*queue) + sizeof(*(queue->kernel_if)) +
- num_data_pages * sizeof(*(queue->kernel_if->page));
-
- q_header = vmalloc(queue_size);
- if (!q_header)
+ const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
+ const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
+ const size_t queue_size =
+ sizeof(*queue) + sizeof(*queue->kernel_if) +
+ pas_size + vas_size;
+
+ queue = vmalloc(queue_size);
+ if (!queue)
return NULL;
- queue = (void *)q_header + PAGE_SIZE;
- queue->q_header = q_header;
+ queue->q_header = NULL;
queue->saved_header = NULL;
queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
- queue->kernel_if->header_page = NULL; /* Unused in guest. */
- queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
+ queue->kernel_if->mutex = NULL;
+ queue->kernel_if->num_pages = num_pages;
+ queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
+ queue->kernel_if->u.g.vas =
+ (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
queue->kernel_if->host = false;
- queue->kernel_if->va = NULL;
- queue->kernel_if->mapped = false;
-
- for (i = 0; i < num_data_pages; i++) {
- queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
- if (!queue->kernel_if->page[i])
- goto fail;
- }
- if (vmci_qp_pinned(flags)) {
- queue->kernel_if->va =
- vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
- PAGE_KERNEL);
- if (!queue->kernel_if->va)
- goto fail;
-
- queue->kernel_if->mapped = true;
+ for (i = 0; i < num_pages; i++) {
+ queue->kernel_if->u.g.vas[i] =
+ dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
+ &queue->kernel_if->u.g.pas[i],
+ GFP_KERNEL);
+ if (!queue->kernel_if->u.g.vas[i]) {
+ /* Size excl. the header. */
+ qp_free_queue(queue, i * PAGE_SIZE);
+ return NULL;
+ }
}
- return (void *)queue;
+ /* Queue header is the first page. */
+ queue->q_header = queue->kernel_if->u.g.vas[0];
- fail:
- qp_free_queue(queue, i * PAGE_SIZE);
- return NULL;
+ return queue;
}
/*
@@ -353,17 +349,18 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
size_t bytes_copied = 0;
while (bytes_copied < size) {
- u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
- size_t page_offset =
+ const u64 page_index =
+ (queue_offset + bytes_copied) / PAGE_SIZE;
+ const size_t page_offset =
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
- if (!kernel_if->mapped)
- va = kmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ va = kmap(kernel_if->u.h.page[page_index]);
else
- va = (void *)((u8 *)kernel_if->va +
- (page_index * PAGE_SIZE));
+ va = kernel_if->u.g.vas[page_index + 1];
+ /* Skip header. */
if (size - bytes_copied > PAGE_SIZE - page_offset)
/* Enough payload to fill up from this page. */
@@ -379,7 +376,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
err = memcpy_fromiovec((u8 *)va + page_offset,
iov, to_copy);
if (err != 0) {
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
return VMCI_ERROR_INVALID_ARGS;
}
} else {
@@ -388,8 +386,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
}
bytes_copied += to_copy;
- if (!kernel_if->mapped)
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
}
return VMCI_SUCCESS;
@@ -411,17 +409,18 @@ static int __qp_memcpy_from_queue(void *dest,
size_t bytes_copied = 0;
while (bytes_copied < size) {
- u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
- size_t page_offset =
+ const u64 page_index =
+ (queue_offset + bytes_copied) / PAGE_SIZE;
+ const size_t page_offset =
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
- if (!kernel_if->mapped)
- va = kmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ va = kmap(kernel_if->u.h.page[page_index]);
else
- va = (void *)((u8 *)kernel_if->va +
- (page_index * PAGE_SIZE));
+ va = kernel_if->u.g.vas[page_index + 1];
+ /* Skip header. */
if (size - bytes_copied > PAGE_SIZE - page_offset)
/* Enough payload to fill up this page. */
@@ -437,7 +436,8 @@ static int __qp_memcpy_from_queue(void *dest,
err = memcpy_toiovec(iov, (u8 *)va + page_offset,
to_copy);
if (err != 0) {
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
return VMCI_ERROR_INVALID_ARGS;
}
} else {
@@ -446,8 +446,8 @@ static int __qp_memcpy_from_queue(void *dest,
}
bytes_copied += to_copy;
- if (!kernel_if->mapped)
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
}
return VMCI_SUCCESS;
@@ -489,12 +489,11 @@ static int qp_alloc_ppn_set(void *prod_q,
return VMCI_ERROR_NO_MEM;
}
- produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
- for (i = 1; i < num_produce_pages; i++) {
+ for (i = 0; i < num_produce_pages; i++) {
unsigned long pfn;
produce_ppns[i] =
- page_to_pfn(produce_q->kernel_if->page[i - 1]);
+ produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
pfn = produce_ppns[i];
/* Fail allocation if PFN isn't supported by hypervisor. */
@@ -503,12 +502,11 @@ static int qp_alloc_ppn_set(void *prod_q,
goto ppn_error;
}
- consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
- for (i = 1; i < num_consume_pages; i++) {
+ for (i = 0; i < num_consume_pages; i++) {
unsigned long pfn;
consume_ppns[i] =
- page_to_pfn(consume_q->kernel_if->page[i - 1]);
+ consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
pfn = consume_ppns[i];
/* Fail allocation if PFN isn't supported by hypervisor. */
@@ -619,23 +617,20 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
const size_t queue_page_size =
- num_pages * sizeof(*queue->kernel_if->page);
+ num_pages * sizeof(*queue->kernel_if->u.h.page);
queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
if (queue) {
queue->q_header = NULL;
queue->saved_header = NULL;
- queue->kernel_if =
- (struct vmci_queue_kern_if *)((u8 *)queue +
- sizeof(*queue));
+ queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
queue->kernel_if->host = true;
queue->kernel_if->mutex = NULL;
queue->kernel_if->num_pages = num_pages;
- queue->kernel_if->header_page =
+ queue->kernel_if->u.h.header_page =
(struct page **)((u8 *)queue + queue_size);
- queue->kernel_if->page = &queue->kernel_if->header_page[1];
- queue->kernel_if->va = NULL;
- queue->kernel_if->mapped = false;
+ queue->kernel_if->u.h.page =
+ &queue->kernel_if->u.h.header_page[1];
}
return queue;
@@ -742,11 +737,12 @@ static int qp_host_get_user_memory(u64 produce_uva,
current->mm,
(uintptr_t) produce_uva,
produce_q->kernel_if->num_pages,
- 1, 0, produce_q->kernel_if->header_page, NULL);
+ 1, 0,
+ produce_q->kernel_if->u.h.header_page, NULL);
if (retval < produce_q->kernel_if->num_pages) {
pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
- qp_release_pages(produce_q->kernel_if->header_page, retval,
- false);
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
+ retval, false);
err = VMCI_ERROR_NO_MEM;
goto out;
}
@@ -755,12 +751,13 @@ static int qp_host_get_user_memory(u64 produce_uva,
current->mm,
(uintptr_t) consume_uva,
consume_q->kernel_if->num_pages,
- 1, 0, consume_q->kernel_if->header_page, NULL);
+ 1, 0,
+ consume_q->kernel_if->u.h.header_page, NULL);
if (retval < consume_q->kernel_if->num_pages) {
pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
- qp_release_pages(consume_q->kernel_if->header_page, retval,
- false);
- qp_release_pages(produce_q->kernel_if->header_page,
+ qp_release_pages(consume_q->kernel_if->u.h.header_page,
+ retval, false);
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, false);
err = VMCI_ERROR_NO_MEM;
}
@@ -803,15 +800,15 @@ static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
- qp_release_pages(produce_q->kernel_if->header_page,
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, true);
- memset(produce_q->kernel_if->header_page, 0,
- sizeof(*produce_q->kernel_if->header_page) *
+ memset(produce_q->kernel_if->u.h.header_page, 0,
+ sizeof(*produce_q->kernel_if->u.h.header_page) *
produce_q->kernel_if->num_pages);
- qp_release_pages(consume_q->kernel_if->header_page,
+ qp_release_pages(consume_q->kernel_if->u.h.header_page,
consume_q->kernel_if->num_pages, true);
- memset(consume_q->kernel_if->header_page, 0,
- sizeof(*consume_q->kernel_if->header_page) *
+ memset(consume_q->kernel_if->u.h.header_page, 0,
+ sizeof(*consume_q->kernel_if->u.h.header_page) *
consume_q->kernel_if->num_pages);
}
@@ -834,12 +831,12 @@ static int qp_host_map_queues(struct vmci_queue *produce_q,
if (produce_q->q_header != consume_q->q_header)
return VMCI_ERROR_QUEUEPAIR_MISMATCH;
- if (produce_q->kernel_if->header_page == NULL ||
- *produce_q->kernel_if->header_page == NULL)
+ if (produce_q->kernel_if->u.h.header_page == NULL ||
+ *produce_q->kernel_if->u.h.header_page == NULL)
return VMCI_ERROR_UNAVAILABLE;
- headers[0] = *produce_q->kernel_if->header_page;
- headers[1] = *consume_q->kernel_if->header_page;
+ headers[0] = *produce_q->kernel_if->u.h.header_page;
+ headers[1] = *consume_q->kernel_if->u.h.header_page;
produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
if (produce_q->q_header != NULL) {
@@ -1720,21 +1717,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
if (result < VMCI_SUCCESS)
return result;
- /*
- * Preemptively load in the headers if non-blocking to
- * prevent blocking later.
- */
- if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
- result = qp_host_map_queues(entry->produce_q,
- entry->consume_q);
- if (result < VMCI_SUCCESS) {
- qp_host_unregister_user_memory(
- entry->produce_q,
- entry->consume_q);
- return result;
- }
- }
-
entry->state = VMCIQPB_ATTACHED_MEM;
} else {
entry->state = VMCIQPB_ATTACHED_NO_MEM;
@@ -1749,24 +1731,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
return VMCI_ERROR_UNAVAILABLE;
} else {
- /*
- * For non-blocking queue pairs, we cannot rely on
- * enqueue/dequeue to map in the pages on the
- * host-side, since it may block, so we make an
- * attempt here.
- */
-
- if (flags & VMCI_QPFLAG_NONBLOCK) {
- result =
- qp_host_map_queues(entry->produce_q,
- entry->consume_q);
- if (result < VMCI_SUCCESS)
- return result;
-
- entry->qp.flags |= flags &
- (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
- }
-
/* The host side has successfully attached to a queue pair. */
entry->state = VMCIQPB_ATTACHED_MEM;
}
@@ -2543,24 +2507,19 @@ void vmci_qp_guest_endpoints_exit(void)
* Since non-blocking isn't yet implemented on the host personality we
* have no reason to acquire a spin lock. So to avoid the use of an
* unnecessary lock only acquire the mutex if we can block.
- * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
- * we can use the same locking function for access to both the queue
- * and the queue headers as it is the same logic. Assert this behvior.
*/
static void qp_lock(const struct vmci_qp *qpair)
{
- if (vmci_can_block(qpair->flags))
- qp_acquire_queue_mutex(qpair->produce_q);
+ qp_acquire_queue_mutex(qpair->produce_q);
}
/*
* Helper routine that unlocks the queue pair after calling
- * qp_lock. Respects non-blocking and pinning flags.
+ * qp_lock.
*/
static void qp_unlock(const struct vmci_qp *qpair)
{
- if (vmci_can_block(qpair->flags))
- qp_release_queue_mutex(qpair->produce_q);
+ qp_release_queue_mutex(qpair->produce_q);
}
/*
@@ -2568,17 +2527,12 @@ static void qp_unlock(const struct vmci_qp *qpair)
* currently not mapped, it will be attempted to do so.
*/
static int qp_map_queue_headers(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q,
- bool can_block)
+ struct vmci_queue *consume_q)
{
int result;
if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
- if (can_block)
- result = qp_host_map_queues(produce_q, consume_q);
- else
- result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
-
+ result = qp_host_map_queues(produce_q, consume_q);
if (result < VMCI_SUCCESS)
return (produce_q->saved_header &&
consume_q->saved_header) ?
@@ -2601,8 +2555,7 @@ static int qp_get_queue_headers(const struct vmci_qp *qpair,
{
int result;
- result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
- vmci_can_block(qpair->flags));
+ result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
if (result == VMCI_SUCCESS) {
*produce_q_header = qpair->produce_q->q_header;
*consume_q_header = qpair->consume_q->q_header;
@@ -2645,9 +2598,6 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
{
unsigned int generation;
- if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
- return false;
-
qpair->blocked++;
generation = qpair->generation;
qp_unlock(qpair);
@@ -2674,15 +2624,14 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
const u64 produce_q_size,
const void *buf,
size_t buf_size,
- vmci_memcpy_to_queue_func memcpy_to_queue,
- bool can_block)
+ vmci_memcpy_to_queue_func memcpy_to_queue)
{
s64 free_space;
u64 tail;
size_t written;
ssize_t result;
- result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ result = qp_map_queue_headers(produce_q, consume_q);
if (unlikely(result != VMCI_SUCCESS))
return result;
@@ -2737,15 +2686,14 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
void *buf,
size_t buf_size,
vmci_memcpy_from_queue_func memcpy_from_queue,
- bool update_consumer,
- bool can_block)
+ bool update_consumer)
{
s64 buf_ready;
u64 head;
size_t read;
ssize_t result;
- result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ result = qp_map_queue_headers(produce_q, consume_q);
if (unlikely(result != VMCI_SUCCESS))
return result;
@@ -2842,32 +2790,11 @@ int vmci_qpair_alloc(struct vmci_qp **qpair,
route = vmci_guest_code_active() ?
VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
- /* If NONBLOCK or PINNED is set, we better be the guest personality. */
- if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
- VMCI_ROUTE_AS_GUEST != route) {
- pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
+ if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
+ pr_devel("NONBLOCK OR PINNED set");
return VMCI_ERROR_INVALID_ARGS;
}
- /*
- * Limit the size of pinned QPs and check sanity.
- *
- * Pinned pages implies non-blocking mode. Mutexes aren't acquired
- * when the NONBLOCK flag is set in qpair code; and also should not be
- * acquired when the PINNED flagged is set. Since pinning pages
- * implies we want speed, it makes no sense not to have NONBLOCK
- * set if PINNED is set. Hence enforce this implication.
- */
- if (vmci_qp_pinned(flags)) {
- if (vmci_can_block(flags)) {
- pr_err("Attempted to enable pinning w/o non-blocking");
- return VMCI_ERROR_INVALID_ARGS;
- }
-
- if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
- return VMCI_ERROR_NO_RESOURCES;
- }
-
my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
if (!my_qpair)
return VMCI_ERROR_NO_MEM;
@@ -3195,8 +3122,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
qpair->consume_q,
qpair->produce_q_size,
buf, buf_size,
- qp_memcpy_to_queue,
- vmci_can_block(qpair->flags));
+ qp_memcpy_to_queue);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3237,8 +3163,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
qpair->consume_q,
qpair->consume_q_size,
buf, buf_size,
- qp_memcpy_from_queue, true,
- vmci_can_block(qpair->flags));
+ qp_memcpy_from_queue, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3280,8 +3205,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
qpair->consume_q,
qpair->consume_q_size,
buf, buf_size,
- qp_memcpy_from_queue, false,
- vmci_can_block(qpair->flags));
+ qp_memcpy_from_queue, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3323,8 +3247,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
qpair->consume_q,
qpair->produce_q_size,
iov, iov_size,
- qp_memcpy_to_queue_iov,
- vmci_can_block(qpair->flags));
+ qp_memcpy_to_queue_iov);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3367,7 +3290,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
qpair->consume_q_size,
iov, iov_size,
qp_memcpy_from_queue_iov,
- true, vmci_can_block(qpair->flags));
+ true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3411,7 +3334,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
qpair->consume_q_size,
iov, iov_size,
qp_memcpy_from_queue_iov,
- false, vmci_can_block(qpair->flags));
+ false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index 58c6959f6b6..ed177f04ef2 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -146,24 +146,6 @@ VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
return page_store->len >= 2;
}
-/*
- * Helper function to check if the non-blocking flag
- * is set for a given queue pair.
- */
-static inline bool vmci_can_block(u32 flags)
-{
- return !(flags & VMCI_QPFLAG_NONBLOCK);
-}
-
-/*
- * Helper function to check if the queue pair is pinned
- * into memory.
- */
-static inline bool vmci_qp_pinned(u32 flags)
-{
- return flags & VMCI_QPFLAG_PINNED;
-}
-
void vmci_qp_broker_exit(void);
int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
u32 flags, u32 priv_flags,