diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/Kconfig | 12 | ||||
-rw-r--r-- | drivers/base/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/base.h | 1 | ||||
-rw-r--r-- | drivers/base/core.c | 2 | ||||
-rw-r--r-- | drivers/base/dd.c | 3 | ||||
-rw-r--r-- | drivers/base/devres.c | 644 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 218 | ||||
-rw-r--r-- | drivers/base/dmapool.c | 59 | ||||
-rw-r--r-- | drivers/pci/pci.c | 127 |
9 files changed, 1066 insertions, 1 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 1429f3a2629..5d6312e3349 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -37,6 +37,18 @@ config DEBUG_DRIVER If you are unsure about this, say N here. +config DEBUG_DEVRES + bool "Managed device resources verbose debug messages" + depends on DEBUG_KERNEL + help + This option enables kernel parameter devres.log. If set to + non-zero, devres debug messages are printed. Select this if + you are having a problem with devres or want to debug + resource management for a managed device. devres.log can be + switched on and off from sysfs node. + + If you are unsure about this, Say N here. + config SYS_HYPERVISOR bool default n diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 7bbb9eeda23..e9eb7382ac3 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -3,6 +3,7 @@ obj-y := core.o sys.o bus.o dd.o \ driver.o class.o platform.o \ cpu.o firmware.o init.o map.o dmapool.o \ + dma-mapping.o devres.o \ attribute_container.o transport_class.o obj-y += power/ obj-$(CONFIG_ISA) += isa.o diff --git a/drivers/base/base.h b/drivers/base/base.h index d26644a5953..de7e1442ce6 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -44,3 +44,4 @@ struct class_device_attribute *to_class_dev_attr(struct attribute *_attr) extern char *make_class_name(const char *name, struct kobject *kobj); +extern void devres_release_all(struct device *dev); diff --git a/drivers/base/core.c b/drivers/base/core.c index e13614241c9..a8ac34ba610 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -428,6 +428,8 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->dma_pools); INIT_LIST_HEAD(&dev->node); init_MUTEX(&dev->sem); + spin_lock_init(&dev->devres_lock); + INIT_LIST_HEAD(&dev->devres_head); device_init_wakeup(dev, 0); set_dev_node(dev, -1); } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b5bf243d9cd..6a48824e43f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -112,6 +112,7 @@ static int really_probe(void *void_data) atomic_inc(&probe_count); pr_debug("%s: Probing driver %s with device %s\n", drv->bus->name, drv->name, dev->bus_id); + WARN_ON(!list_empty(&dev->devres_head)); dev->driver = drv; if (driver_sysfs_add(dev)) { @@ -137,6 +138,7 @@ static int really_probe(void *void_data) goto done; probe_failed: + devres_release_all(dev); driver_sysfs_remove(dev); dev->driver = NULL; @@ -327,6 +329,7 @@ static void __device_release_driver(struct device * dev) dev->bus->remove(dev); else if (drv->remove) drv->remove(dev); + devres_release_all(dev); dev->driver = NULL; put_driver(drv); } diff --git a/drivers/base/devres.c b/drivers/base/devres.c new file mode 100644 index 00000000000..e177c9533b6 --- /dev/null +++ b/drivers/base/devres.c @@ -0,0 +1,644 @@ +/* + * drivers/base/devres.c - device resource management + * + * Copyright (c) 2006 SUSE Linux Products GmbH + * Copyright (c) 2006 Tejun Heo <teheo@suse.de> + * + * This file is released under the GPLv2. + */ + +#include <linux/device.h> +#include <linux/module.h> + +struct devres_node { + struct list_head entry; + dr_release_t release; +#ifdef CONFIG_DEBUG_DEVRES + const char *name; + size_t size; +#endif +}; + +struct devres { + struct devres_node node; + /* -- 3 pointers */ + unsigned long long data[]; /* guarantee ull alignment */ +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; + /* -- 8 pointers */ +}; + +#ifdef CONFIG_DEBUG_DEVRES +static int log_devres = 0; +module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); + +static void set_node_dbginfo(struct devres_node *node, const char *name, + size_t size) +{ + node->name = name; + node->size = size; +} + +static void devres_log(struct device *dev, struct devres_node *node, + const char *op) +{ + if (unlikely(log_devres)) + dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n", + op, node, node->name, (unsigned long)node->size); +} +#else /* CONFIG_DEBUG_DEVRES */ +#define set_node_dbginfo(node, n, s) do {} while (0) +#define devres_log(dev, node, op) do {} while (0) +#endif /* CONFIG_DEBUG_DEVRES */ + +/* + * Release functions for devres group. These callbacks are used only + * for identification. + */ +static void group_open_release(struct device *dev, void *res) +{ + /* noop */ +} + +static void group_close_release(struct device *dev, void *res) +{ + /* noop */ +} + +static struct devres_group * node_to_group(struct devres_node *node) +{ + if (node->release == &group_open_release) + return container_of(node, struct devres_group, node[0]); + if (node->release == &group_close_release) + return container_of(node, struct devres_group, node[1]); + return NULL; +} + +static __always_inline struct devres * alloc_dr(dr_release_t release, + size_t size, gfp_t gfp) +{ + size_t tot_size = sizeof(struct devres) + size; + struct devres *dr; + + dr = kmalloc_track_caller(tot_size, gfp); + if (unlikely(!dr)) + return NULL; + + memset(dr, 0, tot_size); + INIT_LIST_HEAD(&dr->node.entry); + dr->node.release = release; + return dr; +} + +static void add_dr(struct device *dev, struct devres_node *node) +{ + devres_log(dev, node, "ADD"); + BUG_ON(!list_empty(&node->entry)); + list_add_tail(&node->entry, &dev->devres_head); +} + +/** + * devres_alloc - Allocate device resource data + * @release: Release function devres will be associated with + * @size: Allocation size + * @gfp: Allocation flags + * + * allocate devres of @size bytes. The allocated area is zeroed, then + * associated with @release. The returned pointer can be passed to + * other devres_*() functions. + * + * RETURNS: + * Pointer to allocated devres on success, NULL on failure. + */ +#ifdef CONFIG_DEBUG_DEVRES +void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, + const char *name) +{ + struct devres *dr; + + dr = alloc_dr(release, size, gfp); + if (unlikely(!dr)) + return NULL; + set_node_dbginfo(&dr->node, name, size); + return dr->data; +} +EXPORT_SYMBOL_GPL(__devres_alloc); +#else +void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) +{ + struct devres *dr; + + dr = alloc_dr(release, size, gfp); + if (unlikely(!dr)) + return NULL; + return dr->data; +} +EXPORT_SYMBOL_GPL(devres_alloc); +#endif + +/** + * devres_free - Free device resource data + * @res: Pointer to devres data to free + * + * Free devres created with devres_alloc(). + */ +void devres_free(void *res) +{ + if (res) { + struct devres *dr = container_of(res, struct devres, data); + + BUG_ON(!list_empty(&dr->node.entry)); + kfree(dr); + } +} +EXPORT_SYMBOL_GPL(devres_free); + +/** + * devres_add - Register device resource + * @dev: Device to add resource to + * @res: Resource to register + * + * Register devres @res to @dev. @res should have been allocated + * using devres_alloc(). On driver detach, the associated release + * function will be invoked and devres will be freed automatically. + */ +void devres_add(struct device *dev, void *res) +{ + struct devres *dr = container_of(res, struct devres, data); + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + add_dr(dev, &dr->node); + spin_unlock_irqrestore(&dev->devres_lock, flags); +} +EXPORT_SYMBOL_GPL(devres_add); + +static struct devres *find_dr(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + struct devres_node *node; + + list_for_each_entry_reverse(node, &dev->devres_head, entry) { + struct devres *dr = container_of(node, struct devres, node); + + if (node->release != release) + continue; + if (match && !match(dev, dr->data, match_data)) + continue; + return dr; + } + + return NULL; +} + +/** + * devres_find - Find device resource + * @dev: Device to lookup resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev which is associated with @release + * and for which @match returns 1. If @match is NULL, it's considered + * to match all. + * + * RETURNS: + * Pointer to found devres, NULL if not found. + */ +void * devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + struct devres *dr; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + dr = find_dr(dev, release, match, match_data); + spin_unlock_irqrestore(&dev->devres_lock, flags); + + if (dr) + return dr->data; + return NULL; +} +EXPORT_SYMBOL_GPL(devres_find); + +/** + * devres_get - Find devres, if non-existent, add one atomically + * @dev: Device to lookup or add devres for + * @new_res: Pointer to new initialized devres to add if not found + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev which has the same release function + * as @new_res and for which @match return 1. If found, @new_res is + * freed; otherwise, @new_res is added atomically. + * + * RETURNS: + * Pointer to found or added devres. + */ +void * devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data) +{ + struct devres *new_dr = container_of(new_res, struct devres, data); + struct devres *dr; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + dr = find_dr(dev, new_dr->node.release, match, match_data); + if (!dr) { + add_dr(dev, &new_dr->node); + dr = new_dr; + new_dr = NULL; + } + spin_unlock_irqrestore(&dev->devres_lock, flags); + devres_free(new_dr); + + return dr->data; +} +EXPORT_SYMBOL_GPL(devres_get); + +/** + * devres_remove - Find a device resource and remove it + * @dev: Device to find resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev associated with @release and for + * which @match returns 1. If @match is NULL, it's considered to + * match all. If found, the resource is removed atomically and + * returned. + * + * RETURNS: + * Pointer to removed devres on success, NULL if not found. + */ +void * devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + struct devres *dr; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + dr = find_dr(dev, release, match, match_data); + if (dr) { + list_del_init(&dr->node.entry); + devres_log(dev, &dr->node, "REM"); + } + spin_unlock_irqrestore(&dev->devres_lock, flags); + + if (dr) + return dr->data; + return NULL; +} +EXPORT_SYMBOL_GPL(devres_remove); + +/** + * devres_destroy - Find a device resource and destroy it + * @dev: Device to find resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev associated with @release and for + * which @match returns 1. If @match is NULL, it's considered to + * match all. If found, the resource is removed atomically and freed. + * + * RETURNS: + * 0 if devres is found and freed, -ENOENT if not found. + */ +int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + void *res; + + res = devres_remove(dev, release, match, match_data); + if (unlikely(!res)) + return -ENOENT; + + devres_free(res); + return 0; +} +EXPORT_SYMBOL_GPL(devres_destroy); + +static int remove_nodes(struct device *dev, + struct list_head *first, struct list_head *end, + struct list_head *todo) +{ + int cnt = 0, nr_groups = 0; + struct list_head *cur; + + /* First pass - move normal devres entries to @todo and clear + * devres_group colors. + */ + cur = first; + while (cur != end) { + struct devres_node *node; + struct devres_group *grp; + + node = list_entry(cur, struct devres_node, entry); + cur = cur->next; + + grp = node_to_group(node); + if (grp) { + /* clear color of group markers in the first pass */ + grp->color = 0; + nr_groups++; + } else { + /* regular devres entry */ + if (&node->entry == first) + first = first->next; + list_move_tail(&node->entry, todo); + cnt++; + } + } + + if (!nr_groups) + return cnt; + + /* Second pass - Scan groups and color them. A group gets + * color value of two iff the group is wholly contained in + * [cur, end). That is, for a closed group, both opening and + * closing markers should be in the range, while just the + * opening marker is enough for an open group. + */ + cur = first; + while (cur != end) { + struct devres_node *node; + struct devres_group *grp; + + node = list_entry(cur, struct devres_node, entry); + cur = cur->next; + + grp = node_to_group(node); + BUG_ON(!grp || list_empty(&grp->node[0].entry)); + + grp->color++; + if (list_empty(&grp->node[1].entry)) + grp->color++; + + BUG_ON(grp->color <= 0 || grp->color > 2); + if (grp->color == 2) { + /* No need to update cur or end. The removed + * nodes are always before both. + */ + list_move_tail(&grp->node[0].entry, todo); + list_del_init(&grp->node[1].entry); + } + } + + return cnt; +} + +static int release_nodes(struct device *dev, struct list_head *first, + struct list_head *end, unsigned long flags) +{ + LIST_HEAD(todo); + int cnt; + struct devres *dr, *tmp; + + cnt = remove_nodes(dev, first, end, &todo); + + spin_unlock_irqrestore(&dev->devres_lock, flags); + + /* Release. Note that both devres and devres_group are + * handled as devres in the following loop. This is safe. + */ + list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) { + devres_log(dev, &dr->node, "REL"); + dr->node.release(dev, dr->data); + kfree(dr); + } + + return cnt; +} + +/** + * devres_release_all - Release all resources + * @dev: Device to release resources for + * + * Release all resources associated with @dev. This function is + * called on driver detach. + */ +int devres_release_all(struct device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + return release_nodes(dev, dev->devres_head.next, &dev->devres_head, + flags); +} + +/** + * devres_open_group - Open a new devres group + * @dev: Device to open devres group for + * @id: Separator ID + * @gfp: Allocation flags + * + * Open a new devres group for @dev with @id. For @id, using a + * pointer to an object which won't be used for another group is + * recommended. If @id is NULL, address-wise unique ID is created. + * + * RETURNS: + * ID of the new group, NULL on failure. + */ +void * devres_open_group(struct device *dev, void *id, gfp_t gfp) +{ + struct devres_group *grp; + unsigned long flags; + + grp = kmalloc(sizeof(*grp), gfp); + if (unlikely(!grp)) + return NULL; + + grp->node[0].release = &group_open_release; + grp->node[1].release = &group_close_release; + INIT_LIST_HEAD(&grp->node[0].entry); + INIT_LIST_HEAD(&grp->node[1].entry); + set_node_dbginfo(&grp->node[0], "grp<", 0); + set_node_dbginfo(&grp->node[1], "grp>", 0); + grp->id = grp; + if (id) + grp->id = id; + + spin_lock_irqsave(&dev->devres_lock, flags); + add_dr(dev, &grp->node[0]); + spin_unlock_irqrestore(&dev->devres_lock, flags); + return grp->id; +} +EXPORT_SYMBOL_GPL(devres_open_group); + +/* Find devres group with ID @id. If @id is NULL, look for the latest. */ +static struct devres_group * find_group(struct device *dev, void *id) +{ + struct devres_node *node; + + list_for_each_entry_reverse(node, &dev->devres_head, entry) { + struct devres_group *grp; + + if (node->release != &group_open_release) + continue; + + grp = container_of(node, struct devres_group, node[0]); + + if (id) { + if (grp->id == id) + return grp; + } else if (list_empty(&grp->node[1].entry)) + return grp; + } + + return NULL; +} + +/** + * devres_close_group - Close a devres group + * @dev: Device to close devres group for + * @id: ID of target group, can be NULL + * + * Close the group identified by @id. If @id is NULL, the latest open + * group is selected. + */ +void devres_close_group(struct device *dev, void *id) +{ + struct devres_group *grp; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + + grp = find_group(dev, id); + if (grp) + add_dr(dev, &grp->node[1]); + else + WARN_ON(1); + + spin_unlock_irqrestore(&dev->devres_lock, flags); +} +EXPORT_SYMBOL_GPL(devres_close_group); + +/** + * devres_remove_group - Remove a devres group + * @dev: Device to remove group for + * @id: ID of target group, can be NULL + * + * Remove the group identified by @id. If @id is NULL, the latest + * open group is selected. Note that removing a group doesn't affect + * any other resources. + */ +void devres_remove_group(struct device *dev, void *id) +{ + struct devres_group *grp; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + + grp = find_group(dev, id); + if (grp) { + list_del_init(&grp->node[0].entry); + list_del_init(&grp->node[1].entry); + devres_log(dev, &grp->node[0], "REM"); + } else + WARN_ON(1); + + spin_unlock_irqrestore(&dev->devres_lock, flags); + + kfree(grp); +} +EXPORT_SYMBOL_GPL(devres_remove_group); + +/** + * devres_release_group - Release resources in a devres group + * @dev: Device to release group for + * @id: ID of target group, can be NULL + * + * Release all resources in the group identified by @id. If @id is + * NULL, the latest open group is selected. The selected group and + * groups properly nested inside the selected group are removed. + * + * RETURNS: + * The number of released non-group resources. + */ +int devres_release_group(struct device *dev, void *id) +{ + struct devres_group *grp; + unsigned long flags; + int cnt = 0; + + spin_lock_irqsave(&dev->devres_lock, flags); + + grp = find_group(dev, id); + if (grp) { + struct list_head *first = &grp->node[0].entry; + struct list_head *end = &dev->devres_head; + + if (!list_empty(&grp->node[1].entry)) + end = grp->node[1].entry.next; + + cnt = release_nodes(dev, first, end, flags); + } else { + WARN_ON(1); + spin_unlock_irqrestore(&dev->devres_lock, flags); + } + + return cnt; +} +EXPORT_SYMBOL_GPL(devres_release_group); + +/* + * Managed kzalloc/kfree + */ +static void devm_kzalloc_release(struct device *dev, void *res) +{ + /* noop */ +} + +static int devm_kzalloc_match(struct device *dev, void *res, void *data) +{ + return res == data; +} + +/** + * devm_kzalloc - Managed kzalloc + * @dev: Device to allocate memory for + * @size: Allocation size + * @gfp: Allocation gfp flags + * + * Managed kzalloc. Memory allocated with this function is + * automatically freed on driver detach. Like all other devres + * resources, guaranteed alignment is unsigned long long. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) +{ + struct devres *dr; + + /* use raw alloc_dr for kmalloc caller tracing */ + dr = alloc_dr(devm_kzalloc_release, size, gfp); + if (unlikely(!dr)) + return NULL; + + set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); + devres_add(dev, dr->data); + return dr->data; +} +EXPORT_SYMBOL_GPL(devm_kzalloc); + +/** + * devm_kfree - Managed kfree + * @dev: Device this memory belongs to + * @p: Memory to free + * + * Free memory allocated with dev_kzalloc(). + */ +void devm_kfree(struct device *dev, void *p) +{ + int rc; + + rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p); + WARN_ON(rc); +} +EXPORT_SYMBOL_GPL(devm_kfree); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c new file mode 100644 index 00000000000..ca9186f70a6 --- /dev/null +++ b/drivers/base/dma-mapping.c @@ -0,0 +1,218 @@ +/* + * drivers/base/dma-mapping.c - arch-independent dma-mapping routines + * + * Copyright (c) 2006 SUSE Linux Products GmbH + * Copyright (c) 2006 Tejun Heo <teheo@suse.de> + * + * This file is released under the GPLv2. + */ + +#include <linux/dma-mapping.h> + +/* + * Managed DMA API + */ +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; +}; + +static void dmam_coherent_release(struct device *dev, void *res) +{ + struct dma_devres *this = res; + + dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle); +} + +static void dmam_noncoherent_release(struct device *dev, void *res) +{ + struct dma_devres *this = res; + + dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle); +} + +static int dmam_match(struct device *dev, void *res, void *match_data) +{ + struct dma_devres *this = res, *match = match_data; + + if (this->vaddr == match->vaddr) { + WARN_ON(this->size != match->size || + this->dma_handle != match->dma_handle); + return 1; + } + return 0; +} + +/** + * dmam_alloc_coherent - Managed dma_alloc_coherent() + * @dev: Device to allocate coherent memory for + * @size: Size of allocation + * @dma_handle: Out argument for allocated DMA handle + * @gfp: Allocation flags + * + * Managed dma_alloc_coherent(). Memory allocated using this function + * will be automatically released on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void * dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + struct dma_devres *dr; + void *vaddr; + + dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + + vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); + if (!vaddr) { + devres_free(dr); + return NULL; + } + + dr->vaddr = vaddr; + dr->dma_handle = *dma_handle; + dr->size = size; + + devres_add(dev, dr); + + return vaddr; +} +EXPORT_SYMBOL(dmam_alloc_coherent); + +/** + * dmam_free_coherent - Managed dma_free_coherent() + * @dev: Device to free coherent memory for + * @size: Size of allocation + * @vaddr: Virtual address of the memory to free + * @dma_handle: DMA handle of the memory to free + * + * Managed dma_free_coherent(). + */ +void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + struct dma_devres match_data = { size, vaddr, dma_handle }; + + dma_free_coherent(dev, size, vaddr, dma_handle); + WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match, + &match_data)); +} +EXPORT_SYMBOL(dmam_free_coherent); + +/** + * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent() + * @dev: Device to allocate non_coherent memory for + * @size: Size of allocation + * @dma_handle: Out argument for allocated DMA handle + * @gfp: Allocation flags + * + * Managed dma_alloc_non_coherent(). Memory allocated using this + * function will be automatically released on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void *dmam_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + struct dma_devres *dr; + void *vaddr; + + dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + + vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp); + if (!vaddr) { + devres_free(dr); + return NULL; + } + + dr->vaddr = vaddr; + dr->dma_handle = *dma_handle; + dr->size = size; + + devres_add(dev, dr); + + return vaddr; +} +EXPORT_SYMBOL(dmam_alloc_noncoherent); + +/** + * dmam_free_coherent - Managed dma_free_noncoherent() + * @dev: Device to free noncoherent memory for + * @size: Size of allocation + * @vaddr: Virtual address of the memory to free + * @dma_handle: DMA handle of the memory to free + * + * Managed dma_free_noncoherent(). + */ +void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + struct dma_devres match_data = { size, vaddr, dma_handle }; + + dma_free_noncoherent(dev, size, vaddr, dma_handle); + WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match, + &match_data)); +} +EXPORT_SYMBOL(dmam_free_noncoherent); + +#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY + +static void dmam_coherent_decl_release(struct device *dev, void *res) +{ + dma_release_declared_memory(dev); +} + +/** + * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() + * @dev: Device to declare coherent memory for + * @bus_addr: Bus address of coherent memory to be declared + * @device_addr: Device address of coherent memory to be declared + * @size: Size of coherent memory to be declared + * @flags: Flags + * + * Managed dma_declare_coherent_memory(). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, int flags) +{ + void *res; + int rc; + + res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); + if (!res) + return -ENOMEM; + + rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, + flags); + if (rc == 0) + devres_add(dev, res); + else + devres_free(res); + + return rc; +} +EXPORT_SYMBOL(dmam_declare_coherent_memory); + +/** + * dmam_release_declared_memory - Managed dma_release_declared_memory(). + * @dev: Device to release declared coherent memory for + * + * Managed dmam_release_declared_memory(). + */ +void dmam_release_declared_memory(struct device *dev) +{ + WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); +} +EXPORT_SYMBOL(dmam_release_declared_memory); + +#endif diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c index f95d5027727..cd467c9f33b 100644 --- a/drivers/base/dmapool.c +++ b/drivers/base/dmapool.c @@ -415,8 +415,67 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) spin_unlock_irqrestore (&pool->lock, flags); } +/* + * Managed DMA pool + */ +static void dmam_pool_release(struct device *dev, void *res) +{ + struct dma_pool *pool = *(struct dma_pool **)res; + + dma_pool_destroy(pool); +} + +static int dmam_pool_match(struct device *dev, void *res, void *match_data) +{ + return *(struct dma_pool **)res == match_data; +} + +/** + * dmam_pool_create - Managed dma_pool_create() + * @name: name of pool, for diagnostics + * @dev: device that will be doing the DMA + * @size: size of the blocks in this pool. + * @align: alignment requirement for blocks; must be a power of two + * @allocation: returned blocks won't cross this boundary (or zero) + * + * Managed dma_pool_create(). DMA pool created with this function is + * automatically destroyed on driver detach. + */ +struct dma_pool *dmam_pool_create(const char *name, struct device *dev, + size_t size, size_t align, size_t allocation) +{ + struct dma_pool **ptr, *pool; + + ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return NULL; + + pool = *ptr = dma_pool_create(name, dev, size, align, allocation); + if (pool) + devres_add(dev, ptr); + else + devres_free(ptr); + + return pool; +} + +/** + * dmam_pool_destroy - Managed dma_pool_destroy() + * @pool: dma pool that will be destroyed + * + * Managed dma_pool_destroy(). + */ +void dmam_pool_destroy(struct dma_pool *pool) +{ + struct device *dev = pool->dev; + + dma_pool_destroy(pool); + WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); +} EXPORT_SYMBOL (dma_pool_create); EXPORT_SYMBOL (dma_pool_destroy); EXPORT_SYMBOL (dma_pool_alloc); EXPORT_SYMBOL (dma_pool_free); +EXPORT_SYMBOL (dmam_pool_create); +EXPORT_SYMBOL (dmam_pool_destroy); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 84c757ba066..8b44cff2c17 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -744,6 +744,104 @@ int pci_enable_device(struct pci_dev *dev) return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); } +/* + * Managed PCI resources. This manages device on/off, intx/msi/msix + * on/off and BAR regions. pci_dev itself records msi/msix status, so + * there's no need to track it separately. pci_devres is initialized + * when a device is enabled using managed PCI device enable interface. + */ +struct pci_devres { + unsigned int disable:1; + unsigned int orig_intx:1; + unsigned int restore_intx:1; + u32 region_mask; +}; + +static void pcim_release(struct device *gendev, void *res) +{ + struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); + struct pci_devres *this = res; + int i; + + if (dev->msi_enabled) + pci_disable_msi(dev); + if (dev->msix_enabled) + pci_disable_msix(dev); + + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + if (this->region_mask & (1 << i)) + pci_release_region(dev, i); + + if (this->restore_intx) + pci_intx(dev, this->orig_intx); + + if (this->disable) + pci_disable_device(dev); +} + +static struct pci_devres * get_pci_dr(struct pci_dev *pdev) +{ + struct pci_devres *dr, *new_dr; + + dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); + if (dr) + return dr; + + new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); + if (!new_dr) + return NULL; + return devres_get(&pdev->dev, new_dr, NULL, NULL); +} + +static struct pci_devres * find_pci_dr(struct pci_dev *pdev) +{ + if (pci_is_managed(pdev)) + return devres_find(&pdev->dev, pcim_release, NULL, NULL); + return NULL; +} + +/** + * pcim_enable_device - Managed pci_enable_device() + * @pdev: PCI device to be initialized + * + * Managed pci_enable_device(). + */ +int pcim_enable_device(struct pci_dev *pdev) +{ + struct pci_devres *dr; + int rc; + + dr = get_pci_dr(pdev); + if (unlikely(!dr)) + return -ENOMEM; + WARN_ON(!!dr->disable); + + rc = pci_enable_device(pdev); + if (!rc) { + pdev->is_managed = 1; + dr->disable = 1; + } + return rc; +} + +/** + * pcim_pin_device - Pin managed PCI device + * @pdev: PCI device to pin + * + * Pin managed PCI device @pdev. Pinned device won't be disabled on + * driver detach. @pdev must have been enabled with + * pcim_enable_device(). + */ +void pcim_pin_device(struct pci_dev *pdev) +{ + struct pci_devres *dr; + + dr = find_pci_dr(pdev); + WARN_ON(!dr || !dr->disable); + if (dr) + dr->disable = 0; +} + /** * pcibios_disable_device - disable arch specific PCI resources for device dev * @dev: the PCI device to disable @@ -767,8 +865,13 @@ void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} void pci_disable_device(struct pci_dev *dev) { + struct pci_devres *dr; u16 pci_command; + dr = find_pci_dr(dev); + if (dr) + dr->disable = 0; + if (atomic_sub_return(1, &dev->enable_cnt) != 0) return; @@ -867,6 +970,8 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) */ void pci_release_region(struct pci_dev *pdev, int bar) { + struct pci_devres *dr; + if (pci_resource_len(pdev, bar) == 0) return; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) @@ -875,6 +980,10 @@ void pci_release_region(struct pci_dev *pdev, int bar) else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) release_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); + + dr = find_pci_dr(pdev); + if (dr) + dr->region_mask &= ~(1 << bar); } /** @@ -893,6 +1002,8 @@ void pci_release_region(struct pci_dev *pdev, int bar) */ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { + struct pci_devres *dr; + if (pci_resource_len(pdev, bar) == 0) return 0; @@ -906,7 +1017,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) pci_resource_len(pdev, bar), res_name)) goto err_out; } - + + dr = find_pci_dr(pdev); + if (dr) + dr->region_mask |= 1 << bar; + return 0; err_out: @@ -1144,7 +1259,15 @@ pci_intx(struct pci_dev *pdev, int enable) } if (new != pci_command) { + struct pci_devres *dr; + pci_write_config_word(pdev, PCI_COMMAND, new); + + dr = find_pci_dr(pdev); + if (dr && !dr->restore_intx) { + dr->restore_intx = 1; + dr->orig_intx = !enable; + } } } @@ -1226,6 +1349,8 @@ device_initcall(pci_init); EXPORT_SYMBOL_GPL(pci_restore_bars); EXPORT_SYMBOL(pci_enable_device_bars); EXPORT_SYMBOL(pci_enable_device); +EXPORT_SYMBOL(pcim_enable_device); +EXPORT_SYMBOL(pcim_pin_device); EXPORT_SYMBOL(pci_disable_device); EXPORT_SYMBOL(pci_find_capability); EXPORT_SYMBOL(pci_bus_find_capability); |