diff options
Diffstat (limited to 'drivers')
35 files changed, 2050 insertions, 774 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 54bfae1f09a..ebee55537a0 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ obj-$(CONFIG_PARPORT) += parport/ obj-y += base/ block/ misc/ mfd/ nfc/ +obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 23b8726962a..88500fed3c7 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -149,15 +149,21 @@ config EXTRA_FIRMWARE_DIR some other directory containing the firmware files. config FW_LOADER_USER_HELPER + bool + +config FW_LOADER_USER_HELPER_FALLBACK bool "Fallback user-helper invocation for firmware loading" depends on FW_LOADER - default y + select FW_LOADER_USER_HELPER help This option enables / disables the invocation of user-helper (e.g. udev) for loading firmware files as a fallback after the direct file loading in kernel fails. The user-mode helper is no longer required unless you have a special firmware file that - resides in a non-standard path. + resides in a non-standard path. Moreover, the udev support has + been deprecated upstream. + + If you are unsure about this, say N here. config DEBUG_DRIVER bool "Driver Core verbose debug messages" @@ -208,6 +214,15 @@ config DMA_SHARED_BUFFER APIs extension; the file's descriptor can then be passed on to other driver. +config FENCE_TRACE + bool "Enable verbose FENCE_TRACE messages" + depends on DMA_SHARED_BUFFER + help + Enable the FENCE_TRACE printks. This will add extra + spam to the console log, but will make it easier to diagnose + lockup related problems for dma-buffers shared across multiple + devices. + config DMA_CMA bool "DMA Contiguous Memory Allocator" depends on HAVE_DMA_CONTIGUOUS && CMA diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 04b314e0fa5..4aab26ec029 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o -obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o obj-$(CONFIG_ISA) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o diff --git a/drivers/base/component.c b/drivers/base/component.c index c4778995cd7..f748430bb65 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -18,6 +18,15 @@ #include <linux/mutex.h> #include <linux/slab.h> +struct component_match { + size_t alloc; + size_t num; + struct { + void *data; + int (*fn)(struct device *, void *); + } compare[0]; +}; + struct master { struct list_head node; struct list_head components; @@ -25,6 +34,7 @@ struct master { const struct component_master_ops *ops; struct device *dev; + struct component_match *match; }; struct component { @@ -69,6 +79,11 @@ static void component_detach_master(struct master *master, struct component *c) c->master = NULL; } +/* + * Add a component to a master, finding the component via the compare + * function and compare data. This is safe to call for duplicate matches + * and will not result in the same component being added multiple times. + */ int component_master_add_child(struct master *master, int (*compare)(struct device *, void *), void *compare_data) { @@ -76,11 +91,12 @@ int component_master_add_child(struct master *master, int ret = -ENXIO; list_for_each_entry(c, &component_list, node) { - if (c->master) + if (c->master && c->master != master) continue; if (compare(c->dev, compare_data)) { - component_attach_master(master, c); + if (!c->master) + component_attach_master(master, c); ret = 0; break; } @@ -90,6 +106,34 @@ int component_master_add_child(struct master *master, } EXPORT_SYMBOL_GPL(component_master_add_child); +static int find_components(struct master *master) +{ + struct component_match *match = master->match; + size_t i; + int ret = 0; + + if (!match) { + /* + * Search the list of components, looking for components that + * belong to this master, and attach them to the master. + */ + return master->ops->add_components(master->dev, master); + } + + /* + * Scan the array of match functions and attach + * any components which are found to this master. + */ + for (i = 0; i < match->num; i++) { + ret = component_master_add_child(master, + match->compare[i].fn, + match->compare[i].data); + if (ret) + break; + } + return ret; +} + /* Detach all attached components from this master */ static void master_remove_components(struct master *master) { @@ -113,44 +157,44 @@ static void master_remove_components(struct master *master) static int try_to_bring_up_master(struct master *master, struct component *component) { - int ret = 0; + int ret; - if (!master->bound) { - /* - * Search the list of components, looking for components that - * belong to this master, and attach them to the master. - */ - if (master->ops->add_components(master->dev, master)) { - /* Failed to find all components */ - master_remove_components(master); - ret = 0; - goto out; - } + if (master->bound) + return 0; - if (component && component->master != master) { - master_remove_components(master); - ret = 0; - goto out; - } + /* + * Search the list of components, looking for components that + * belong to this master, and attach them to the master. + */ + if (find_components(master)) { + /* Failed to find all components */ + ret = 0; + goto out; + } - if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } + if (component && component->master != master) { + ret = 0; + goto out; + } - /* Found all components */ - ret = master->ops->bind(master->dev); - if (ret < 0) { - devres_release_group(master->dev, NULL); - dev_info(master->dev, "master bind failed: %d\n", ret); - master_remove_components(master); - goto out; - } + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } - master->bound = true; - ret = 1; + /* Found all components */ + ret = master->ops->bind(master->dev); + if (ret < 0) { + devres_release_group(master->dev, NULL); + dev_info(master->dev, "master bind failed: %d\n", ret); + goto out; } + + master->bound = true; + return 1; + out: + master_remove_components(master); return ret; } @@ -180,18 +224,89 @@ static void take_down_master(struct master *master) master_remove_components(master); } -int component_master_add(struct device *dev, - const struct component_master_ops *ops) +static size_t component_match_size(size_t num) +{ + return offsetof(struct component_match, compare[num]); +} + +static struct component_match *component_match_realloc(struct device *dev, + struct component_match *match, size_t num) +{ + struct component_match *new; + + if (match && match->alloc == num) + return match; + + new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL); + if (!new) + return ERR_PTR(-ENOMEM); + + if (match) { + memcpy(new, match, component_match_size(min(match->num, num))); + devm_kfree(dev, match); + } else { + new->num = 0; + } + + new->alloc = num; + + return new; +} + +/* + * Add a component to be matched. + * + * The match array is first created or extended if necessary. + */ +void component_match_add(struct device *dev, struct component_match **matchptr, + int (*compare)(struct device *, void *), void *compare_data) +{ + struct component_match *match = *matchptr; + + if (IS_ERR(match)) + return; + + if (!match || match->num == match->alloc) { + size_t new_size = match ? match->alloc + 16 : 15; + + match = component_match_realloc(dev, match, new_size); + + *matchptr = match; + + if (IS_ERR(match)) + return; + } + + match->compare[match->num].fn = compare; + match->compare[match->num].data = compare_data; + match->num++; +} +EXPORT_SYMBOL(component_match_add); + +int component_master_add_with_match(struct device *dev, + const struct component_master_ops *ops, + struct component_match *match) { struct master *master; int ret; + if (ops->add_components && match) + return -EINVAL; + + if (match) { + /* Reallocate the match array for its true size */ + match = component_match_realloc(dev, match, match->num); + if (IS_ERR(match)) + return PTR_ERR(match); + } + master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) return -ENOMEM; master->dev = dev; master->ops = ops; + master->match = match; INIT_LIST_HEAD(&master->components); /* Add to the list of available masters. */ @@ -209,6 +324,13 @@ int component_master_add(struct device *dev, return ret < 0 ? ret : 0; } +EXPORT_SYMBOL_GPL(component_master_add_with_match); + +int component_master_add(struct device *dev, + const struct component_master_ops *ops) +{ + return component_master_add_with_match(dev, ops, NULL); +} EXPORT_SYMBOL_GPL(component_master_add); void component_master_del(struct device *dev, diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index d276e33880b..da77791793f 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -100,10 +100,16 @@ static inline long firmware_loading_timeout(void) #define FW_OPT_UEVENT (1U << 0) #define FW_OPT_NOWAIT (1U << 1) #ifdef CONFIG_FW_LOADER_USER_HELPER -#define FW_OPT_FALLBACK (1U << 2) +#define FW_OPT_USERHELPER (1U << 2) #else -#define FW_OPT_FALLBACK 0 +#define FW_OPT_USERHELPER 0 #endif +#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK +#define FW_OPT_FALLBACK FW_OPT_USERHELPER +#else +#define FW_OPT_FALLBACK 0 +#endif +#define FW_OPT_NO_WARN (1U << 3) struct firmware_cache { /* firmware_buf instance will be added into the below list */ @@ -279,26 +285,15 @@ static const char * const fw_path[] = { module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); -/* Don't inline this: 'struct kstat' is biggish */ -static noinline_for_stack int fw_file_size(struct file *file) -{ - struct kstat st; - if (vfs_getattr(&file->f_path, &st)) - return -1; - if (!S_ISREG(st.mode)) - return -1; - if (st.size != (int)st.size) - return -1; - return st.size; -} - static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) { int size; char *buf; int rc; - size = fw_file_size(file); + if (!S_ISREG(file_inode(file)->i_mode)) + return -EINVAL; + size = i_size_read(file_inode(file)); if (size <= 0) return -EINVAL; buf = vmalloc(size); @@ -718,7 +713,7 @@ out: static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) { struct firmware_buf *buf = fw_priv->buf; - int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; + int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT; /* If the array of pages is too small, grow it... */ if (buf->page_array_size < pages_needed) { @@ -911,7 +906,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, wait_for_completion(&buf->completion); cancel_delayed_work_sync(&fw_priv->timeout_work); - if (!buf->data) + if (is_fw_load_aborted(buf)) + retval = -EAGAIN; + else if (!buf->data) retval = -ENOMEM; device_remove_file(f_dev, &dev_attr_loading); @@ -1111,10 +1108,11 @@ _request_firmware(const struct firmware **firmware_p, const char *name, ret = fw_get_filesystem_firmware(device, fw->priv); if (ret) { - if (opt_flags & FW_OPT_FALLBACK) { + if (!(opt_flags & FW_OPT_NO_WARN)) dev_warn(device, - "Direct firmware load failed with error %d\n", - ret); + "Direct firmware load for %s failed with error %d\n", + name, ret); + if (opt_flags & FW_OPT_USERHELPER) { dev_warn(device, "Falling back to user helper\n"); ret = fw_load_from_user_helper(fw, name, device, opt_flags, timeout); @@ -1171,7 +1169,6 @@ request_firmware(const struct firmware **firmware_p, const char *name, } EXPORT_SYMBOL(request_firmware); -#ifdef CONFIG_FW_LOADER_USER_HELPER /** * request_firmware: - load firmware directly without usermode helper * @firmware_p: pointer to firmware image @@ -1188,12 +1185,12 @@ int request_firmware_direct(const struct firmware **firmware_p, { int ret; __module_get(THIS_MODULE); - ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT); + ret = _request_firmware(firmware_p, name, device, + FW_OPT_UEVENT | FW_OPT_NO_WARN); module_put(THIS_MODULE); return ret; } EXPORT_SYMBOL_GPL(request_firmware_direct); -#endif /** * release_firmware: - release the resource associated with a firmware image @@ -1277,7 +1274,7 @@ request_firmware_nowait( fw_work->context = context; fw_work->cont = cont; fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | - (uevent ? FW_OPT_UEVENT : 0); + (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); if (!try_module_get(module)) { kfree(fw_work); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 00f2208949d..ab4f4ce0272 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -24,6 +24,7 @@ #include <linux/idr.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> +#include <linux/limits.h> #include "base.h" #include "power/power.h" @@ -176,7 +177,7 @@ EXPORT_SYMBOL_GPL(platform_add_devices); struct platform_object { struct platform_device pdev; - char name[1]; + char name[]; }; /** @@ -202,6 +203,7 @@ static void platform_device_release(struct device *dev) kfree(pa->pdev.dev.platform_data); kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); + kfree(pa->pdev.driver_override); kfree(pa); } @@ -217,7 +219,7 @@ struct platform_device *platform_device_alloc(const char *name, int id) { struct platform_object *pa; - pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL); + pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); if (pa) { strcpy(pa->name, name); pa->pdev.name = pa->name; @@ -713,8 +715,49 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, } static DEVICE_ATTR_RO(modalias); +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + char *driver_override, *old = pdev->driver_override, *cp; + + if (count > PATH_MAX) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + if (strlen(driver_override)) { + pdev->driver_override = driver_override; + } else { + kfree(driver_override); + pdev->driver_override = NULL; + } + + kfree(old); + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + + return sprintf(buf, "%s\n", pdev->driver_override); +} +static DEVICE_ATTR_RW(driver_override); + + static struct attribute *platform_dev_attrs[] = { &dev_attr_modalias.attr, + &dev_attr_driver_override.attr, NULL, }; ATTRIBUTE_GROUPS(platform_dev); @@ -770,6 +813,10 @@ static int platform_match(struct device *dev, struct device_driver *drv) struct platform_device *pdev = to_platform_device(dev); struct platform_driver *pdrv = to_platform_driver(drv); + /* When driver_override is set, only bind to the matching driver */ + if (pdev->driver_override) + return !strcmp(pdev->driver_override, drv->name); + /* Attempt an OF style match first */ if (of_driver_match_device(dev, drv)) return 1; diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c deleted file mode 100644 index a73fbf3b8e5..00000000000 --- a/drivers/base/reservation.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (C) 2012-2013 Canonical Ltd - * - * Based on bo.c which bears the following copyright notice, - * but is dual licensed: - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> - */ - -#include <linux/reservation.h> -#include <linux/export.h> - -DEFINE_WW_CLASS(reservation_ww_class); -EXPORT_SYMBOL(reservation_ww_class); diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index 6159b7752a6..f2cd6a2d40b 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -212,9 +212,9 @@ static int brcmstb_gisb_arb_probe(struct platform_device *pdev) mutex_init(&gdev->lock); INIT_LIST_HEAD(&gdev->next); - gdev->base = devm_request_and_ioremap(&pdev->dev, r); - if (!gdev->base) - return -ENOMEM; + gdev->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(gdev->base)) + return PTR_ERR(gdev->base); err = devm_request_irq(&pdev->dev, timeout_irq, brcmstb_gisb_timeout_handler, 0, pdev->name, diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile new file mode 100644 index 00000000000..57a675f90cd --- /dev/null +++ b/drivers/dma-buf/Makefile @@ -0,0 +1 @@ +obj-y := dma-buf.o fence.o reservation.o seqno-fence.o diff --git a/drivers/base/dma-buf.c b/drivers/dma-buf/dma-buf.c index 840c7fa8098..f3014c448e1 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -25,10 +25,13 @@ #include <linux/fs.h> #include <linux/slab.h> #include <linux/dma-buf.h> +#include <linux/fence.h> #include <linux/anon_inodes.h> #include <linux/export.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/poll.h> +#include <linux/reservation.h> static inline int is_dma_buf_file(struct file *); @@ -50,12 +53,25 @@ static int dma_buf_release(struct inode *inode, struct file *file) BUG_ON(dmabuf->vmapping_counter); + /* + * Any fences that a dma-buf poll can wait on should be signaled + * before releasing dma-buf. This is the responsibility of each + * driver that uses the reservation objects. + * + * If you hit this BUG() it means someone dropped their ref to the + * dma-buf while still having pending operation to the buffer. + */ + BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); + dmabuf->ops->release(dmabuf); mutex_lock(&db_list.lock); list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); + if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) + reservation_object_fini(dmabuf->resv); + kfree(dmabuf); return 0; } @@ -103,10 +119,141 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) return base + offset; } +static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) +{ + struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; + unsigned long flags; + + spin_lock_irqsave(&dcb->poll->lock, flags); + wake_up_locked_poll(dcb->poll, dcb->active); + dcb->active = 0; + spin_unlock_irqrestore(&dcb->poll->lock, flags); +} + +static unsigned int dma_buf_poll(struct file *file, poll_table *poll) +{ + struct dma_buf *dmabuf; + struct reservation_object *resv; + struct reservation_object_list *fobj; + struct fence *fence_excl; + unsigned long events; + unsigned shared_count, seq; + + dmabuf = file->private_data; + if (!dmabuf || !dmabuf->resv) + return POLLERR; + + resv = dmabuf->resv; + + poll_wait(file, &dmabuf->poll, poll); + + events = poll_requested_events(poll) & (POLLIN | POLLOUT); + if (!events) + return 0; + +retry: + seq = read_seqcount_begin(&resv->seq); + rcu_read_lock(); + + fobj = rcu_dereference(resv->fence); + if (fobj) + shared_count = fobj->shared_count; + else + shared_count = 0; + fence_excl = rcu_dereference(resv->fence_excl); + if (read_seqcount_retry(&resv->seq, seq)) { + rcu_read_unlock(); + goto retry; + } + + if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; + unsigned long pevents = POLLIN; + + if (shared_count == 0) + pevents |= POLLOUT; + + spin_lock_irq(&dmabuf->poll.lock); + if (dcb->active) { + dcb->active |= pevents; + events &= ~pevents; + } else + dcb->active = pevents; + spin_unlock_irq(&dmabuf->poll.lock); + + if (events & pevents) { + if (!fence_get_rcu(fence_excl)) { + /* force a recheck */ + events &= ~pevents; + dma_buf_poll_cb(NULL, &dcb->cb); + } else if (!fence_add_callback(fence_excl, &dcb->cb, + dma_buf_poll_cb)) { + events &= ~pevents; + fence_put(fence_excl); + } else { + /* + * No callback queued, wake up any additional + * waiters. + */ + fence_put(fence_excl); + dma_buf_poll_cb(NULL, &dcb->cb); + } + } + } + + if ((events & POLLOUT) && shared_count > 0) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; + int i; + + /* Only queue a new callback if no event has fired yet */ + spin_lock_irq(&dmabuf->poll.lock); + if (dcb->active) + events &= ~POLLOUT; + else + dcb->active = POLLOUT; + spin_unlock_irq(&dmabuf->poll.lock); + + if (!(events & POLLOUT)) + goto out; + + for (i = 0; i < shared_count; ++i) { + struct fence *fence = rcu_dereference(fobj->shared[i]); + + if (!fence_get_rcu(fence)) { + /* + * fence refcount dropped to zero, this means + * that fobj has been freed + * + * call dma_buf_poll_cb and force a recheck! + */ + events &= ~POLLOUT; + dma_buf_poll_cb(NULL, &dcb->cb); + break; + } + if (!fence_add_callback(fence, &dcb->cb, + dma_buf_poll_cb)) { + fence_put(fence); + events &= ~POLLOUT; + break; + } + fence_put(fence); + } + + /* No callback queued, wake up any additional waiters. */ + if (i == shared_count) + dma_buf_poll_cb(NULL, &dcb->cb); + } + +out: + rcu_read_unlock(); + return events; +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, + .poll = dma_buf_poll, }; /* @@ -128,6 +275,7 @@ static inline int is_dma_buf_file(struct file *file) * @size: [in] Size of the buffer * @flags: [in] mode flags for the file. * @exp_name: [in] name of the exporting module - useful for debugging. + * @resv: [in] reservation-object, NULL to allocate default one. * * Returns, on success, a newly created dma_buf object, which wraps the * supplied private data and operations for dma_buf_ops. On either missing @@ -135,10 +283,17 @@ static inline int is_dma_buf_file(struct file *file) * */ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, - size_t size, int flags, const char *exp_name) + size_t size, int flags, const char *exp_name, + struct reservation_object *resv) { struct dma_buf *dmabuf; struct file *file; + size_t alloc_size = sizeof(struct dma_buf); + if (!resv) + alloc_size += sizeof(struct reservation_object); + else + /* prevent &dma_buf[1] == dma_buf->resv */ + alloc_size += 1; if (WARN_ON(!priv || !ops || !ops->map_dma_buf @@ -150,7 +305,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, return ERR_PTR(-EINVAL); } - dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); + dmabuf = kzalloc(alloc_size, GFP_KERNEL); if (dmabuf == NULL) return ERR_PTR(-ENOMEM); @@ -158,6 +313,15 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, dmabuf->ops = ops; dmabuf->size = size; dmabuf->exp_name = exp_name; + init_waitqueue_head(&dmabuf->poll); + dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; + dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; + + if (!resv) { + resv = (struct reservation_object *)&dmabuf[1]; + reservation_object_init(resv); + } + dmabuf->resv = resv; file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); if (IS_ERR(file)) { diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c new file mode 100644 index 00000000000..4222cb2aa96 --- /dev/null +++ b/drivers/dma-buf/fence.c @@ -0,0 +1,431 @@ +/* + * Fence mechanism for dma-buf and to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Maarten Lankhorst <maarten.lankhorst@canonical.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/atomic.h> +#include <linux/fence.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/fence.h> + +EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); +EXPORT_TRACEPOINT_SYMBOL(fence_emit); + +/** + * fence context counter: each execution context should have its own + * fence context, this allows checking if fences belong to the same + * context or not. One device can have multiple separate contexts, + * and they're used if some engine can run independently of another. + */ +static atomic_t fence_context_counter = ATOMIC_INIT(0); + +/** + * fence_context_alloc - allocate an array of fence contexts + * @num: [in] amount of contexts to allocate + * + * This function will return the first index of the number of fences allocated. + * The fence context is used for setting fence->context to a unique number. + */ +unsigned fence_context_alloc(unsigned num) +{ + BUG_ON(!num); + return atomic_add_return(num, &fence_context_counter) - num; +} +EXPORT_SYMBOL(fence_context_alloc); + +/** + * fence_signal_locked - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * fence_wait() calls and run all the callbacks added with + * fence_add_callback(). Can be called multiple times, but since a fence + * can only go from unsignaled to signaled state, it will only be effective + * the first time. + * + * Unlike fence_signal, this function must be called with fence->lock held. + */ +int fence_signal_locked(struct fence *fence) +{ + struct fence_cb *cur, *tmp; + int ret = 0; + + if (WARN_ON(!fence)) + return -EINVAL; + + if (!ktime_to_ns(fence->timestamp)) { + fence->timestamp = ktime_get(); + smp_mb__before_atomic(); + } + + if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + ret = -EINVAL; + + /* + * we might have raced with the unlocked fence_signal, + * still run through all callbacks + */ + } else + trace_fence_signaled(fence); + + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); + cur->func(fence, cur); + } + return ret; +} +EXPORT_SYMBOL(fence_signal_locked); + +/** + * fence_signal - signal completion of a fence + * @fence: the fence to signal + * + * Signal completion for software callbacks on a fence, this will unblock + * fence_wait() calls and run all the callbacks added with + * fence_add_callback(). Can be called multiple times, but since a fence + * can only go from unsignaled to signaled state, it will only be effective + * the first time. + */ +int fence_signal(struct fence *fence) +{ + unsigned long flags; + + if (!fence) + return -EINVAL; + + if (!ktime_to_ns(fence->timestamp)) { + fence->timestamp = ktime_get(); + smp_mb__before_atomic(); + } + + if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return -EINVAL; + + trace_fence_signaled(fence); + + if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { + struct fence_cb *cur, *tmp; + + spin_lock_irqsave(fence->lock, flags); + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + list_del_init(&cur->node); + cur->func(fence, cur); + } + spin_unlock_irqrestore(fence->lock, flags); + } + return 0; +} +EXPORT_SYMBOL(fence_signal); + +/** + * fence_wait_timeout - sleep until the fence gets signaled + * or until timeout elapses + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the + * remaining timeout in jiffies on success. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly (buf-mgr between reservation and committing) + * holds a reference to the fence, otherwise the fence might be + * freed before return, resulting in undefined behavior. + */ +signed long +fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) +{ + signed long ret; + + if (WARN_ON(timeout < 0)) + return -EINVAL; + + trace_fence_wait_start(fence); + ret = fence->ops->wait(fence, intr, timeout); + trace_fence_wait_end(fence); + return ret; +} +EXPORT_SYMBOL(fence_wait_timeout); + +void fence_release(struct kref *kref) +{ + struct fence *fence = + container_of(kref, struct fence, refcount); + + trace_fence_destroy(fence); + + BUG_ON(!list_empty(&fence->cb_list)); + + if (fence->ops->release) + fence->ops->release(fence); + else + fence_free(fence); +} +EXPORT_SYMBOL(fence_release); + +void fence_free(struct fence *fence) +{ + kfree_rcu(fence, rcu); +} +EXPORT_SYMBOL(fence_free); + +/** + * fence_enable_sw_signaling - enable signaling on fence + * @fence: [in] the fence to enable + * + * this will request for sw signaling to be enabled, to make the fence + * complete as soon as possible + */ +void fence_enable_sw_signaling(struct fence *fence) +{ + unsigned long flags; + + if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && + !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + trace_fence_enable_signal(fence); + + spin_lock_irqsave(fence->lock, flags); + + if (!fence->ops->enable_signaling(fence)) + fence_signal_locked(fence); + + spin_unlock_irqrestore(fence->lock, flags); + } +} +EXPORT_SYMBOL(fence_enable_sw_signaling); + +/** + * fence_add_callback - add a callback to be called when the fence + * is signaled + * @fence: [in] the fence to wait on + * @cb: [in] the callback to register + * @func: [in] the function to call + * + * cb will be initialized by fence_add_callback, no initialization + * by the caller is required. Any number of callbacks can be registered + * to a fence, but a callback can only be registered to one fence at a time. + * + * Note that the callback can be called from an atomic context. If + * fence is already signaled, this function will return -ENOENT (and + * *not* call the callback) + * + * Add a software callback to the fence. Same restrictions apply to + * refcount as it does to fence_wait, however the caller doesn't need to + * keep a refcount to fence afterwards: when software access is enabled, + * the creator of the fence is required to keep the fence alive until + * after it signals with fence_signal. The callback itself can be called + * from irq context. + * + */ +int fence_add_callback(struct fence *fence, struct fence_cb *cb, + fence_func_t func) +{ + unsigned long flags; + int ret = 0; + bool was_set; + + if (WARN_ON(!fence || !func)) + return -EINVAL; + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + INIT_LIST_HEAD(&cb->node); + return -ENOENT; + } + + spin_lock_irqsave(fence->lock, flags); + + was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + ret = -ENOENT; + else if (!was_set) { + trace_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + fence_signal_locked(fence); + ret = -ENOENT; + } + } + + if (!ret) { + cb->func = func; + list_add_tail(&cb->node, &fence->cb_list); + } else + INIT_LIST_HEAD(&cb->node); + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(fence_add_callback); + +/** + * fence_remove_callback - remove a callback from the signaling list + * @fence: [in] the fence to wait on + * @cb: [in] the callback to remove + * + * Remove a previously queued callback from the fence. This function returns + * true if the callback is succesfully removed, or false if the fence has + * already been signaled. + * + * *WARNING*: + * Cancelling a callback should only be done if you really know what you're + * doing, since deadlocks and race conditions could occur all too easily. For + * this reason, it should only ever be done on hardware lockup recovery, + * with a reference held to the fence. + */ +bool +fence_remove_callback(struct fence *fence, struct fence_cb *cb) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(fence->lock, flags); + + ret = !list_empty(&cb->node); + if (ret) + list_del_init(&cb->node); + + spin_unlock_irqrestore(fence->lock, flags); + + return ret; +} +EXPORT_SYMBOL(fence_remove_callback); + +struct default_wait_cb { + struct fence_cb base; + struct task_struct *task; +}; + +static void +fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) +{ + struct default_wait_cb *wait = + container_of(cb, struct default_wait_cb, base); + + wake_up_state(wait->task, TASK_NORMAL); +} + +/** + * fence_default_wait - default sleep until the fence gets signaled + * or until timeout elapses + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the + * remaining timeout in jiffies on success. + */ +signed long +fence_default_wait(struct fence *fence, bool intr, signed long timeout) +{ + struct default_wait_cb cb; + unsigned long flags; + signed long ret = timeout; + bool was_set; + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return timeout; + + spin_lock_irqsave(fence->lock, flags); + + if (intr && signal_pending(current)) { + ret = -ERESTARTSYS; + goto out; + } + + was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + goto out; + + if (!was_set) { + trace_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + fence_signal_locked(fence); + goto out; + } + } + + cb.base.func = fence_default_wait_cb; + cb.task = current; + list_add(&cb.base.node, &fence->cb_list); + + while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { + if (intr) + __set_current_state(TASK_INTERRUPTIBLE); + else + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irqrestore(fence->lock, flags); + + ret = schedule_timeout(ret); + + spin_lock_irqsave(fence->lock, flags); + if (ret > 0 && intr && signal_pending(current)) + ret = -ERESTARTSYS; + } + + if (!list_empty(&cb.base.node)) + list_del(&cb.base.node); + __set_current_state(TASK_RUNNING); + +out: + spin_unlock_irqrestore(fence->lock, flags); + return ret; +} +EXPORT_SYMBOL(fence_default_wait); + +/** + * fence_init - Initialize a custom fence. + * @fence: [in] the fence to initialize + * @ops: [in] the fence_ops for operations on this fence + * @lock: [in] the irqsafe spinlock to use for locking this fence + * @context: [in] the execution context this fence is run on + * @seqno: [in] a linear increasing sequence number for this context + * + * Initializes an allocated fence, the caller doesn't have to keep its + * refcount after committing with this fence, but it will need to hold a + * refcount again if fence_ops.enable_signaling gets called. This can + * be used for other implementing other types of fence. + * + * context and seqno are used for easy comparison between fences, allowing + * to check which fence is later by simply using fence_later. + */ +void +fence_init(struct fence *fence, const struct fence_ops *ops, + spinlock_t *lock, unsigned context, unsigned seqno) +{ + BUG_ON(!lock); + BUG_ON(!ops || !ops->wait || !ops->enable_signaling || + !ops->get_driver_name || !ops->get_timeline_name); + + kref_init(&fence->refcount); + fence->ops = ops; + INIT_LIST_HEAD(&fence->cb_list); + fence->lock = lock; + fence->context = context; + fence->seqno = seqno; + fence->flags = 0UL; + + trace_fence_init(fence); +} +EXPORT_SYMBOL(fence_init); diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c new file mode 100644 index 00000000000..3c97c8fa8d0 --- /dev/null +++ b/drivers/dma-buf/reservation.c @@ -0,0 +1,477 @@ +/* + * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#include <linux/reservation.h> +#include <linux/export.h> + +DEFINE_WW_CLASS(reservation_ww_class); +EXPORT_SYMBOL(reservation_ww_class); + +struct lock_class_key reservation_seqcount_class; +EXPORT_SYMBOL(reservation_seqcount_class); + +const char reservation_seqcount_string[] = "reservation_seqcount"; +EXPORT_SYMBOL(reservation_seqcount_string); +/* + * Reserve space to add a shared fence to a reservation_object, + * must be called with obj->lock held. + */ +int reservation_object_reserve_shared(struct reservation_object *obj) +{ + struct reservation_object_list *fobj, *old; + u32 max; + + old = reservation_object_get_list(obj); + + if (old && old->shared_max) { + if (old->shared_count < old->shared_max) { + /* perform an in-place update */ + kfree(obj->staged); + obj->staged = NULL; + return 0; + } else + max = old->shared_max * 2; + } else + max = 4; + + /* + * resize obj->staged or allocate if it doesn't exist, + * noop if already correct size + */ + fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]), + GFP_KERNEL); + if (!fobj) + return -ENOMEM; + + obj->staged = fobj; + fobj->shared_max = max; + return 0; +} +EXPORT_SYMBOL(reservation_object_reserve_shared); + +static void +reservation_object_add_shared_inplace(struct reservation_object *obj, + struct reservation_object_list *fobj, + struct fence *fence) +{ + u32 i; + + fence_get(fence); + + preempt_disable(); + write_seqcount_begin(&obj->seq); + + for (i = 0; i < fobj->shared_count; ++i) { + struct fence *old_fence; + + old_fence = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(obj)); + + if (old_fence->context == fence->context) { + /* memory barrier is added by write_seqcount_begin */ + RCU_INIT_POINTER(fobj->shared[i], fence); + write_seqcount_end(&obj->seq); + preempt_enable(); + + fence_put(old_fence); + return; + } + } + + /* + * memory barrier is added by write_seqcount_begin, + * fobj->shared_count is protected by this lock too + */ + RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); + fobj->shared_count++; + + write_seqcount_end(&obj->seq); + preempt_enable(); +} + +static void +reservation_object_add_shared_replace(struct reservation_object *obj, + struct reservation_object_list *old, + struct reservation_object_list *fobj, + struct fence *fence) +{ + unsigned i; + struct fence *old_fence = NULL; + + fence_get(fence); + + if (!old) { + RCU_INIT_POINTER(fobj->shared[0], fence); + fobj->shared_count = 1; + goto done; + } + + /* + * no need to bump fence refcounts, rcu_read access + * requires the use of kref_get_unless_zero, and the + * references from the old struct are carried over to + * the new. + */ + fobj->shared_count = old->shared_count; + + for (i = 0; i < old->shared_count; ++i) { + struct fence *check; + + check = rcu_dereference_protected(old->shared[i], + reservation_object_held(obj)); + + if (!old_fence && check->context == fence->context) { + old_fence = check; + RCU_INIT_POINTER(fobj->shared[i], fence); + } else + RCU_INIT_POINTER(fobj->shared[i], check); + } + if (!old_fence) { + RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); + fobj->shared_count++; + } + +done: + preempt_disable(); + write_seqcount_begin(&obj->seq); + /* + * RCU_INIT_POINTER can be used here, + * seqcount provides the necessary barriers + */ + RCU_INIT_POINTER(obj->fence, fobj); + write_seqcount_end(&obj->seq); + preempt_enable(); + + if (old) + kfree_rcu(old, rcu); + + if (old_fence) + fence_put(old_fence); +} + +/* + * Add a fence to a shared slot, obj->lock must be held, and + * reservation_object_reserve_shared_fence has been called. + */ +void reservation_object_add_shared_fence(struct reservation_object *obj, + struct fence *fence) +{ + struct reservation_object_list *old, *fobj = obj->staged; + + old = reservation_object_get_list(obj); + obj->staged = NULL; + + if (!fobj) { + BUG_ON(old->shared_count >= old->shared_max); + reservation_object_add_shared_inplace(obj, old, fence); + } else + reservation_object_add_shared_replace(obj, old, fobj, fence); +} +EXPORT_SYMBOL(reservation_object_add_shared_fence); + +void reservation_object_add_excl_fence(struct reservation_object *obj, + struct fence *fence) +{ + struct fence *old_fence = reservation_object_get_excl(obj); + struct reservation_object_list *old; + u32 i = 0; + + old = reservation_object_get_list(obj); + if (old) + i = old->shared_count; + + if (fence) + fence_get(fence); + + preempt_disable(); + write_seqcount_begin(&obj->seq); + /* write_seqcount_begin provides the necessary memory barrier */ + RCU_INIT_POINTER(obj->fence_excl, fence); + if (old) + old->shared_count = 0; + write_seqcount_end(&obj->seq); + preempt_enable(); + + /* inplace update, no shared fences */ + while (i--) + fence_put(rcu_dereference_protected(old->shared[i], + reservation_object_held(obj))); + + if (old_fence) + fence_put(old_fence); +} +EXPORT_SYMBOL(reservation_object_add_excl_fence); + +int reservation_object_get_fences_rcu(struct reservation_object *obj, + struct fence **pfence_excl, + unsigned *pshared_count, + struct fence ***pshared) +{ + unsigned shared_count = 0; + unsigned retry = 1; + struct fence **shared = NULL, *fence_excl = NULL; + int ret = 0; + + while (retry) { + struct reservation_object_list *fobj; + unsigned seq; + + seq = read_seqcount_begin(&obj->seq); + + rcu_read_lock(); + + fobj = rcu_dereference(obj->fence); + if (fobj) { + struct fence **nshared; + size_t sz = sizeof(*shared) * fobj->shared_max; + + nshared = krealloc(shared, sz, + GFP_NOWAIT | __GFP_NOWARN); + if (!nshared) { + rcu_read_unlock(); + nshared = krealloc(shared, sz, GFP_KERNEL); + if (nshared) { + shared = nshared; + continue; + } + + ret = -ENOMEM; + shared_count = 0; + break; + } + shared = nshared; + memcpy(shared, fobj->shared, sz); + shared_count = fobj->shared_count; + } else + shared_count = 0; + fence_excl = rcu_dereference(obj->fence_excl); + + retry = read_seqcount_retry(&obj->seq, seq); + if (retry) + goto unlock; + + if (!fence_excl || fence_get_rcu(fence_excl)) { + unsigned i; + + for (i = 0; i < shared_count; ++i) { + if (fence_get_rcu(shared[i])) + continue; + + /* uh oh, refcount failed, abort and retry */ + while (i--) + fence_put(shared[i]); + + if (fence_excl) { + fence_put(fence_excl); + fence_excl = NULL; + } + + retry = 1; + break; + } + } else + retry = 1; + +unlock: + rcu_read_unlock(); + } + *pshared_count = shared_count; + if (shared_count) + *pshared = shared; + else { + *pshared = NULL; + kfree(shared); + } + *pfence_excl = fence_excl; + + return ret; +} +EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); + +long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + bool wait_all, bool intr, + unsigned long timeout) +{ + struct fence *fence; + unsigned seq, shared_count, i = 0; + long ret = timeout; + +retry: + fence = NULL; + shared_count = 0; + seq = read_seqcount_begin(&obj->seq); + rcu_read_lock(); + + if (wait_all) { + struct reservation_object_list *fobj = rcu_dereference(obj->fence); + + if (fobj) + shared_count = fobj->shared_count; + + if (read_seqcount_retry(&obj->seq, seq)) + goto unlock_retry; + + for (i = 0; i < shared_count; ++i) { + struct fence *lfence = rcu_dereference(fobj->shared[i]); + + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) + continue; + + if (!fence_get_rcu(lfence)) + goto unlock_retry; + + if (fence_is_signaled(lfence)) { + fence_put(lfence); + continue; + } + + fence = lfence; + break; + } + } + + if (!shared_count) { + struct fence *fence_excl = rcu_dereference(obj->fence_excl); + + if (read_seqcount_retry(&obj->seq, seq)) + goto unlock_retry; + + if (fence_excl && + !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { + if (!fence_get_rcu(fence_excl)) + goto unlock_retry; + + if (fence_is_signaled(fence_excl)) + fence_put(fence_excl); + else + fence = fence_excl; + } + } + + rcu_read_unlock(); + if (fence) { + ret = fence_wait_timeout(fence, intr, ret); + fence_put(fence); + if (ret > 0 && wait_all && (i + 1 < shared_count)) + goto retry; + } + return ret; + +unlock_retry: + rcu_read_unlock(); + goto retry; +} +EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); + + +static inline int +reservation_object_test_signaled_single(struct fence *passed_fence) +{ + struct fence *fence, *lfence = passed_fence; + int ret = 1; + + if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { + int ret; + + fence = fence_get_rcu(lfence); + if (!fence) + return -1; + + ret = !!fence_is_signaled(fence); + fence_put(fence); + } + return ret; +} + +bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + bool test_all) +{ + unsigned seq, shared_count; + int ret = true; + +retry: + shared_count = 0; + seq = read_seqcount_begin(&obj->seq); + rcu_read_lock(); + + if (test_all) { + unsigned i; + + struct reservation_object_list *fobj = rcu_dereference(obj->fence); + + if (fobj) + shared_count = fobj->shared_count; + + if (read_seqcount_retry(&obj->seq, seq)) + goto unlock_retry; + + for (i = 0; i < shared_count; ++i) { + struct fence *fence = rcu_dereference(fobj->shared[i]); + + ret = reservation_object_test_signaled_single(fence); + if (ret < 0) + goto unlock_retry; + else if (!ret) + break; + } + + /* + * There could be a read_seqcount_retry here, but nothing cares + * about whether it's the old or newer fence pointers that are + * signaled. That race could still have happened after checking + * read_seqcount_retry. If you care, use ww_mutex_lock. + */ + } + + if (!shared_count) { + struct fence *fence_excl = rcu_dereference(obj->fence_excl); + + if (read_seqcount_retry(&obj->seq, seq)) + goto unlock_retry; + + if (fence_excl) { + ret = reservation_object_test_signaled_single(fence_excl); + if (ret < 0) + goto unlock_retry; + } + } + + rcu_read_unlock(); + return ret; + +unlock_retry: + rcu_read_unlock(); + goto retry; +} +EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c new file mode 100644 index 00000000000..7d12a39a4b5 --- /dev/null +++ b/drivers/dma-buf/seqno-fence.c @@ -0,0 +1,73 @@ +/* + * seqno-fence, using a dma-buf to synchronize fencing + * + * Copyright (C) 2012 Texas Instruments + * Copyright (C) 2012-2014 Canonical Ltd + * Authors: + * Rob Clark <robdclark@gmail.com> + * Maarten Lankhorst <maarten.lankhorst@canonical.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/seqno-fence.h> + +static const char *seqno_fence_get_driver_name(struct fence *fence) +{ + struct seqno_fence *seqno_fence = to_seqno_fence(fence); + return seqno_fence->ops->get_driver_name(fence); +} + +static const char *seqno_fence_get_timeline_name(struct fence *fence) +{ + struct seqno_fence *seqno_fence = to_seqno_fence(fence); + return seqno_fence->ops->get_timeline_name(fence); +} + +static bool seqno_enable_signaling(struct fence *fence) +{ + struct seqno_fence *seqno_fence = to_seqno_fence(fence); + return seqno_fence->ops->enable_signaling(fence); +} + +static bool seqno_signaled(struct fence *fence) +{ + struct seqno_fence *seqno_fence = to_seqno_fence(fence); + return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); +} + +static void seqno_release(struct fence *fence) +{ + struct seqno_fence *f = to_seqno_fence(fence); + + dma_buf_put(f->sync_buf); + if (f->ops->release) + f->ops->release(fence); + else + fence_free(&f->base); +} + +static signed long seqno_wait(struct fence *fence, bool intr, signed long timeout) +{ + struct seqno_fence *f = to_seqno_fence(fence); + return f->ops->wait(fence, intr, timeout); +} + +const struct fence_ops seqno_fence_ops = { + .get_driver_name = seqno_fence_get_driver_name, + .get_timeline_name = seqno_fence_get_timeline_name, + .enable_signaling = seqno_enable_signaling, + .signaled = seqno_signaled, + .wait = seqno_wait, + .release = seqno_release, +}; +EXPORT_SYMBOL(seqno_fence_ops); diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 81c34f949df..3aedf9e993e 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1039,11 +1039,9 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num, if (ret) return ret; - base = devm_request_and_ioremap(dev->dev, res); - if (!base) { - DRM_ERROR("failed to ioremap register\n"); - return -ENOMEM; - } + base = devm_ioremap_resource(dev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL); if (!dcrtc) { diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index bb9b642d848..7496f55611a 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -539,7 +539,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size, - O_RDWR); + O_RDWR, NULL); } struct drm_gem_object * diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 304ca8cacbc..99d578bad17 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -336,7 +336,13 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { struct dma_buf *drm_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); + struct reservation_object *robj = NULL; + + if (dev->driver->gem_prime_res_obj) + robj = dev->driver->gem_prime_res_obj(obj); + + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, + flags, robj); } EXPORT_SYMBOL(drm_gem_prime_export); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 2a3ad24276f..60192ed544f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -187,7 +187,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); return dma_buf_export(obj, &exynos_dmabuf_ops, - exynos_gem_obj->base.size, flags); + exynos_gem_obj->base.size, flags, NULL); } struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 580aa42443e..82a1f4b5777 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -237,7 +237,8 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, return ERR_PTR(ret); } - return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags); + return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags, + NULL); } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 5425ffe3931..c9428c943af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -845,6 +845,7 @@ driver = { .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = nouveau_gem_prime_pin, + .gem_prime_res_obj = nouveau_gem_prime_res_obj, .gem_prime_unpin = nouveau_gem_prime_unpin, .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index 7caca057bc3..ddab762d81f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h @@ -35,6 +35,7 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *, struct drm_file *); extern int nouveau_gem_prime_pin(struct drm_gem_object *); +struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *); extern void nouveau_gem_prime_unpin(struct drm_gem_object *); extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 51a2cb102b4..1f51008e4d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -102,3 +102,10 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj) nouveau_bo_unpin(nvbo); } + +struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(obj); + + return nvbo->bo.resv; +} diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 4fcca8d4279..a2dbfb1737b 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -171,7 +171,7 @@ static struct dma_buf_ops omap_dmabuf_ops = { struct dma_buf *omap_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { - return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags); + return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL); } struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e9e36108424..959f0866d99 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -132,6 +132,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sg); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); +struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *); void *radeon_gem_prime_vmap(struct drm_gem_object *obj); void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, @@ -566,6 +567,7 @@ static struct drm_driver kms_driver = { .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = radeon_gem_prime_pin, .gem_prime_unpin = radeon_gem_prime_unpin, + .gem_prime_res_obj = radeon_gem_prime_res_obj, .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, .gem_prime_vmap = radeon_gem_prime_vmap, diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 20074560fc2..28d71070c38 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -103,3 +103,11 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj) radeon_bo_unpin(bo); radeon_bo_unreserve(bo); } + + +struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct radeon_bo *bo = gem_to_radeon_bo(obj); + + return bo->tbo.resv; +} diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index aa85b7b26f1..78cc8143760 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -420,7 +420,7 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, int flags) { return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size, - flags); + flags, NULL); } struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index d2a05335278..12c87110db3 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -695,7 +695,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, } dma_buf = dma_buf_export(prime, &tdev->ops, - prime->size, flags); + prime->size, flags, NULL); if (IS_ERR(dma_buf)) { ret = PTR_ERR(dma_buf); ttm_mem_global_free(tdev->mem_glob, diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 880be0782dd..c4e4dfa8123 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) if (WARN_ON(!buf->sgt_base)) return NULL; - dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags); + dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL); if (IS_ERR(dbuf)) return NULL; diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 99e484f845f..51607e9aa04 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -88,6 +88,7 @@ config SYNC bool "Synchronization framework" default n select ANON_INODES + select DMA_SHARED_BUFFER ---help--- This option enables the framework for synchronization between multiple drivers. Sync implementations can take advantage of hardware diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 0a01e191490..517ad5ffa42 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -9,5 +9,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o -obj-$(CONFIG_SYNC) += sync.o +obj-$(CONFIG_SYNC) += sync.o sync_debug.o obj-$(CONFIG_SW_SYNC) += sw_sync.o diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 389b8f67a2e..270360912b2 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1120,7 +1120,8 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client, ion_buffer_get(buffer); mutex_unlock(&client->lock); - dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); + dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR, + NULL); if (IS_ERR(dmabuf)) { ion_buffer_put(buffer); return dmabuf; diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c index 12a136ec1ce..a76db3ff87c 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/staging/android/sw_sync.c @@ -50,7 +50,7 @@ static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) { struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; struct sw_sync_timeline *obj = - (struct sw_sync_timeline *)sync_pt->parent; + (struct sw_sync_timeline *)sync_pt_parent(sync_pt); return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); } @@ -59,7 +59,7 @@ static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) { struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; struct sw_sync_timeline *obj = - (struct sw_sync_timeline *)sync_pt->parent; + (struct sw_sync_timeline *)sync_pt_parent(sync_pt); return sw_sync_cmp(obj->value, pt->value) >= 0; } @@ -97,7 +97,6 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt, char *str, int size) { struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; - snprintf(str, size, "%d", pt->value); } @@ -157,7 +156,6 @@ static int sw_sync_open(struct inode *inode, struct file *file) static int sw_sync_release(struct inode *inode, struct file *file) { struct sw_sync_timeline *obj = file->private_data; - sync_timeline_destroy(&obj->obj); return 0; } diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 18174f7c871..c9a0c2cdc81 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -31,22 +31,13 @@ #define CREATE_TRACE_POINTS #include "trace/sync.h" -static void sync_fence_signal_pt(struct sync_pt *pt); -static int _sync_pt_has_signaled(struct sync_pt *pt); -static void sync_fence_free(struct kref *kref); -static void sync_dump(void); - -static LIST_HEAD(sync_timeline_list_head); -static DEFINE_SPINLOCK(sync_timeline_list_lock); - -static LIST_HEAD(sync_fence_list_head); -static DEFINE_SPINLOCK(sync_fence_list_lock); +static const struct fence_ops android_fence_ops; +static const struct file_operations sync_fence_fops; struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name) { struct sync_timeline *obj; - unsigned long flags; if (size < sizeof(struct sync_timeline)) return NULL; @@ -57,17 +48,14 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, kref_init(&obj->kref); obj->ops = ops; + obj->context = fence_context_alloc(1); strlcpy(obj->name, name, sizeof(obj->name)); INIT_LIST_HEAD(&obj->child_list_head); - spin_lock_init(&obj->child_list_lock); - INIT_LIST_HEAD(&obj->active_list_head); - spin_lock_init(&obj->active_list_lock); + spin_lock_init(&obj->child_list_lock); - spin_lock_irqsave(&sync_timeline_list_lock, flags); - list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); - spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + sync_timeline_debug_add(obj); return obj; } @@ -77,11 +65,8 @@ static void sync_timeline_free(struct kref *kref) { struct sync_timeline *obj = container_of(kref, struct sync_timeline, kref); - unsigned long flags; - spin_lock_irqsave(&sync_timeline_list_lock, flags); - list_del(&obj->sync_timeline_list); - spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + sync_timeline_debug_remove(obj); if (obj->ops->release_obj) obj->ops->release_obj(obj); @@ -89,6 +74,16 @@ static void sync_timeline_free(struct kref *kref) kfree(obj); } +static void sync_timeline_get(struct sync_timeline *obj) +{ + kref_get(&obj->kref); +} + +static void sync_timeline_put(struct sync_timeline *obj) +{ + kref_put(&obj->kref, sync_timeline_free); +} + void sync_timeline_destroy(struct sync_timeline *obj) { obj->destroyed = true; @@ -102,75 +97,33 @@ void sync_timeline_destroy(struct sync_timeline *obj) * signal any children that their parent is going away. */ sync_timeline_signal(obj); - - kref_put(&obj->kref, sync_timeline_free); + sync_timeline_put(obj); } EXPORT_SYMBOL(sync_timeline_destroy); -static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) -{ - unsigned long flags; - - pt->parent = obj; - - spin_lock_irqsave(&obj->child_list_lock, flags); - list_add_tail(&pt->child_list, &obj->child_list_head); - spin_unlock_irqrestore(&obj->child_list_lock, flags); -} - -static void sync_timeline_remove_pt(struct sync_pt *pt) -{ - struct sync_timeline *obj = pt->parent; - unsigned long flags; - - spin_lock_irqsave(&obj->active_list_lock, flags); - if (!list_empty(&pt->active_list)) - list_del_init(&pt->active_list); - spin_unlock_irqrestore(&obj->active_list_lock, flags); - - spin_lock_irqsave(&obj->child_list_lock, flags); - if (!list_empty(&pt->child_list)) - list_del_init(&pt->child_list); - - spin_unlock_irqrestore(&obj->child_list_lock, flags); -} - void sync_timeline_signal(struct sync_timeline *obj) { unsigned long flags; LIST_HEAD(signaled_pts); - struct list_head *pos, *n; + struct sync_pt *pt, *next; trace_sync_timeline(obj); - spin_lock_irqsave(&obj->active_list_lock, flags); - - list_for_each_safe(pos, n, &obj->active_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, active_list); + spin_lock_irqsave(&obj->child_list_lock, flags); - if (_sync_pt_has_signaled(pt)) { - list_del_init(pos); - list_add(&pt->signaled_list, &signaled_pts); - kref_get(&pt->fence->kref); - } + list_for_each_entry_safe(pt, next, &obj->active_list_head, + active_list) { + if (fence_is_signaled_locked(&pt->base)) + list_del(&pt->active_list); } - spin_unlock_irqrestore(&obj->active_list_lock, flags); - - list_for_each_safe(pos, n, &signaled_pts) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, signaled_list); - - list_del_init(pos); - sync_fence_signal_pt(pt); - kref_put(&pt->fence->kref, sync_fence_free); - } + spin_unlock_irqrestore(&obj->child_list_lock, flags); } EXPORT_SYMBOL(sync_timeline_signal); -struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) +struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size) { + unsigned long flags; struct sync_pt *pt; if (size < sizeof(struct sync_pt)) @@ -180,87 +133,28 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) if (pt == NULL) return NULL; + spin_lock_irqsave(&obj->child_list_lock, flags); + sync_timeline_get(obj); + fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock, + obj->context, ++obj->value); + list_add_tail(&pt->child_list, &obj->child_list_head); INIT_LIST_HEAD(&pt->active_list); - kref_get(&parent->kref); - sync_timeline_add_pt(parent, pt); - + spin_unlock_irqrestore(&obj->child_list_lock, flags); return pt; } EXPORT_SYMBOL(sync_pt_create); void sync_pt_free(struct sync_pt *pt) { - if (pt->parent->ops->free_pt) - pt->parent->ops->free_pt(pt); - - sync_timeline_remove_pt(pt); - - kref_put(&pt->parent->kref, sync_timeline_free); - - kfree(pt); + fence_put(&pt->base); } EXPORT_SYMBOL(sync_pt_free); -/* call with pt->parent->active_list_lock held */ -static int _sync_pt_has_signaled(struct sync_pt *pt) -{ - int old_status = pt->status; - - if (!pt->status) - pt->status = pt->parent->ops->has_signaled(pt); - - if (!pt->status && pt->parent->destroyed) - pt->status = -ENOENT; - - if (pt->status != old_status) - pt->timestamp = ktime_get(); - - return pt->status; -} - -static struct sync_pt *sync_pt_dup(struct sync_pt *pt) -{ - return pt->parent->ops->dup(pt); -} - -/* Adds a sync pt to the active queue. Called when added to a fence */ -static void sync_pt_activate(struct sync_pt *pt) -{ - struct sync_timeline *obj = pt->parent; - unsigned long flags; - int err; - - spin_lock_irqsave(&obj->active_list_lock, flags); - - err = _sync_pt_has_signaled(pt); - if (err != 0) - goto out; - - list_add_tail(&pt->active_list, &obj->active_list_head); - -out: - spin_unlock_irqrestore(&obj->active_list_lock, flags); -} - -static int sync_fence_release(struct inode *inode, struct file *file); -static unsigned int sync_fence_poll(struct file *file, poll_table *wait); -static long sync_fence_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); - - -static const struct file_operations sync_fence_fops = { - .release = sync_fence_release, - .poll = sync_fence_poll, - .unlocked_ioctl = sync_fence_ioctl, - .compat_ioctl = sync_fence_ioctl, -}; - -static struct sync_fence *sync_fence_alloc(const char *name) +static struct sync_fence *sync_fence_alloc(int size, const char *name) { struct sync_fence *fence; - unsigned long flags; - fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); + fence = kzalloc(size, GFP_KERNEL); if (fence == NULL) return NULL; @@ -272,16 +166,8 @@ static struct sync_fence *sync_fence_alloc(const char *name) kref_init(&fence->kref); strlcpy(fence->name, name, sizeof(fence->name)); - INIT_LIST_HEAD(&fence->pt_list_head); - INIT_LIST_HEAD(&fence->waiter_list_head); - spin_lock_init(&fence->waiter_list_lock); - init_waitqueue_head(&fence->wq); - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); - spin_unlock_irqrestore(&sync_fence_list_lock, flags); - return fence; err: @@ -289,120 +175,42 @@ err: return NULL; } -/* TODO: implement a create which takes more that one sync_pt */ -struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) +static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) { + struct sync_fence_cb *check; struct sync_fence *fence; - if (pt->fence) - return NULL; - - fence = sync_fence_alloc(name); - if (fence == NULL) - return NULL; + check = container_of(cb, struct sync_fence_cb, cb); + fence = check->fence; - pt->fence = fence; - list_add(&pt->pt_list, &fence->pt_list_head); - sync_pt_activate(pt); - - /* - * signal the fence in case pt was activated before - * sync_pt_activate(pt) was called - */ - sync_fence_signal_pt(pt); - - return fence; -} -EXPORT_SYMBOL(sync_fence_create); - -static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) -{ - struct list_head *pos; - - list_for_each(pos, &src->pt_list_head) { - struct sync_pt *orig_pt = - container_of(pos, struct sync_pt, pt_list); - struct sync_pt *new_pt = sync_pt_dup(orig_pt); - - if (new_pt == NULL) - return -ENOMEM; - - new_pt->fence = dst; - list_add(&new_pt->pt_list, &dst->pt_list_head); - } - - return 0; + if (atomic_dec_and_test(&fence->status)) + wake_up_all(&fence->wq); } -static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) -{ - struct list_head *src_pos, *dst_pos, *n; - - list_for_each(src_pos, &src->pt_list_head) { - struct sync_pt *src_pt = - container_of(src_pos, struct sync_pt, pt_list); - bool collapsed = false; - - list_for_each_safe(dst_pos, n, &dst->pt_list_head) { - struct sync_pt *dst_pt = - container_of(dst_pos, struct sync_pt, pt_list); - /* collapse two sync_pts on the same timeline - * to a single sync_pt that will signal at - * the later of the two - */ - if (dst_pt->parent == src_pt->parent) { - if (dst_pt->parent->ops->compare(dst_pt, src_pt) - == -1) { - struct sync_pt *new_pt = - sync_pt_dup(src_pt); - if (new_pt == NULL) - return -ENOMEM; - - new_pt->fence = dst; - list_replace(&dst_pt->pt_list, - &new_pt->pt_list); - sync_pt_free(dst_pt); - } - collapsed = true; - break; - } - } - - if (!collapsed) { - struct sync_pt *new_pt = sync_pt_dup(src_pt); - - if (new_pt == NULL) - return -ENOMEM; - - new_pt->fence = dst; - list_add(&new_pt->pt_list, &dst->pt_list_head); - } - } - - return 0; -} - -static void sync_fence_detach_pts(struct sync_fence *fence) +/* TODO: implement a create which takes more that one sync_pt */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) { - struct list_head *pos, *n; + struct sync_fence *fence; - list_for_each_safe(pos, n, &fence->pt_list_head) { - struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name); + if (fence == NULL) + return NULL; - sync_timeline_remove_pt(pt); - } -} + fence->num_fences = 1; + atomic_set(&fence->status, 1); -static void sync_fence_free_pts(struct sync_fence *fence) -{ - struct list_head *pos, *n; + fence_get(&pt->base); + fence->cbs[0].sync_pt = &pt->base; + fence->cbs[0].fence = fence; + if (fence_add_callback(&pt->base, &fence->cbs[0].cb, + fence_check_cb_func)) + atomic_dec(&fence->status); - list_for_each_safe(pos, n, &fence->pt_list_head) { - struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_fence_debug_add(fence); - sync_pt_free(pt); - } + return fence; } +EXPORT_SYMBOL(sync_fence_create); struct sync_fence *sync_fence_fdget(int fd) { @@ -434,197 +242,155 @@ void sync_fence_install(struct sync_fence *fence, int fd) } EXPORT_SYMBOL(sync_fence_install); -static int sync_fence_get_status(struct sync_fence *fence) +static void sync_fence_add_pt(struct sync_fence *fence, + int *i, struct fence *pt) { - struct list_head *pos; - int status = 1; - - list_for_each(pos, &fence->pt_list_head) { - struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); - int pt_status = pt->status; - - if (pt_status < 0) { - status = pt_status; - break; - } else if (status == 1) { - status = pt_status; - } - } + fence->cbs[*i].sync_pt = pt; + fence->cbs[*i].fence = fence; - return status; + if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) { + fence_get(pt); + (*i)++; + } } struct sync_fence *sync_fence_merge(const char *name, struct sync_fence *a, struct sync_fence *b) { + int num_fences = a->num_fences + b->num_fences; struct sync_fence *fence; - struct list_head *pos; - int err; + int i, i_a, i_b; + unsigned long size = offsetof(struct sync_fence, cbs[num_fences]); - fence = sync_fence_alloc(name); + fence = sync_fence_alloc(size, name); if (fence == NULL) return NULL; - err = sync_fence_copy_pts(fence, a); - if (err < 0) - goto err; + atomic_set(&fence->status, num_fences); - err = sync_fence_merge_pts(fence, b); - if (err < 0) - goto err; + /* + * Assume sync_fence a and b are both ordered and have no + * duplicates with the same context. + * + * If a sync_fence can only be created with sync_fence_merge + * and sync_fence_create, this is a reasonable assumption. + */ + for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) { + struct fence *pt_a = a->cbs[i_a].sync_pt; + struct fence *pt_b = b->cbs[i_b].sync_pt; + + if (pt_a->context < pt_b->context) { + sync_fence_add_pt(fence, &i, pt_a); + + i_a++; + } else if (pt_a->context > pt_b->context) { + sync_fence_add_pt(fence, &i, pt_b); - list_for_each(pos, &fence->pt_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, pt_list); - sync_pt_activate(pt); + i_b++; + } else { + if (pt_a->seqno - pt_b->seqno <= INT_MAX) + sync_fence_add_pt(fence, &i, pt_a); + else + sync_fence_add_pt(fence, &i, pt_b); + + i_a++; + i_b++; + } } - /* - * signal the fence in case one of it's pts were activated before - * they were activated - */ - sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, - struct sync_pt, - pt_list)); + for (; i_a < a->num_fences; i_a++) + sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt); + for (; i_b < b->num_fences; i_b++) + sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt); + + if (num_fences > i) + atomic_sub(num_fences - i, &fence->status); + fence->num_fences = i; + + sync_fence_debug_add(fence); return fence; -err: - sync_fence_free_pts(fence); - kfree(fence); - return NULL; } EXPORT_SYMBOL(sync_fence_merge); -static void sync_fence_signal_pt(struct sync_pt *pt) +int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, + int wake_flags, void *key) { - LIST_HEAD(signaled_waiters); - struct sync_fence *fence = pt->fence; - struct list_head *pos; - struct list_head *n; - unsigned long flags; - int status; - - status = sync_fence_get_status(fence); - - spin_lock_irqsave(&fence->waiter_list_lock, flags); - /* - * this should protect against two threads racing on the signaled - * false -> true transition - */ - if (status && !fence->status) { - list_for_each_safe(pos, n, &fence->waiter_list_head) - list_move(pos, &signaled_waiters); - - fence->status = status; - } else { - status = 0; - } - spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + struct sync_fence_waiter *wait; - if (status) { - list_for_each_safe(pos, n, &signaled_waiters) { - struct sync_fence_waiter *waiter = - container_of(pos, struct sync_fence_waiter, - waiter_list); + wait = container_of(curr, struct sync_fence_waiter, work); + list_del_init(&wait->work.task_list); - list_del(pos); - waiter->callback(fence, waiter); - } - wake_up(&fence->wq); - } + wait->callback(wait->work.private, wait); + return 1; } int sync_fence_wait_async(struct sync_fence *fence, struct sync_fence_waiter *waiter) { + int err = atomic_read(&fence->status); unsigned long flags; - int err = 0; - spin_lock_irqsave(&fence->waiter_list_lock, flags); + if (err < 0) + return err; - if (fence->status) { - err = fence->status; - goto out; - } + if (!err) + return 1; - list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); -out: - spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq); + waiter->work.private = fence; - return err; + spin_lock_irqsave(&fence->wq.lock, flags); + err = atomic_read(&fence->status); + if (err > 0) + __add_wait_queue_tail(&fence->wq, &waiter->work); + spin_unlock_irqrestore(&fence->wq.lock, flags); + + if (err < 0) + return err; + + return !err; } EXPORT_SYMBOL(sync_fence_wait_async); int sync_fence_cancel_async(struct sync_fence *fence, struct sync_fence_waiter *waiter) { - struct list_head *pos; - struct list_head *n; unsigned long flags; - int ret = -ENOENT; + int ret = 0; - spin_lock_irqsave(&fence->waiter_list_lock, flags); - /* - * Make sure waiter is still in waiter_list because it is possible for - * the waiter to be removed from the list while the callback is still - * pending. - */ - list_for_each_safe(pos, n, &fence->waiter_list_head) { - struct sync_fence_waiter *list_waiter = - container_of(pos, struct sync_fence_waiter, - waiter_list); - if (list_waiter == waiter) { - list_del(pos); - ret = 0; - break; - } - } - spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + spin_lock_irqsave(&fence->wq.lock, flags); + if (!list_empty(&waiter->work.task_list)) + list_del_init(&waiter->work.task_list); + else + ret = -ENOENT; + spin_unlock_irqrestore(&fence->wq.lock, flags); return ret; } EXPORT_SYMBOL(sync_fence_cancel_async); -static bool sync_fence_check(struct sync_fence *fence) -{ - /* - * Make sure that reads to fence->status are ordered with the - * wait queue event triggering - */ - smp_rmb(); - return fence->status != 0; -} - int sync_fence_wait(struct sync_fence *fence, long timeout) { - int err = 0; - struct sync_pt *pt; - - trace_sync_wait(fence, 1); - list_for_each_entry(pt, &fence->pt_list_head, pt_list) - trace_sync_pt(pt); + long ret; + int i; - if (timeout > 0) { + if (timeout < 0) + timeout = MAX_SCHEDULE_TIMEOUT; + else timeout = msecs_to_jiffies(timeout); - err = wait_event_interruptible_timeout(fence->wq, - sync_fence_check(fence), - timeout); - } else if (timeout < 0) { - err = wait_event_interruptible(fence->wq, - sync_fence_check(fence)); - } - trace_sync_wait(fence, 0); - if (err < 0) - return err; - - if (fence->status < 0) { - pr_info("fence error %d on [%p]\n", fence->status, fence); - sync_dump(); - return fence->status; - } + trace_sync_wait(fence, 1); + for (i = 0; i < fence->num_fences; ++i) + trace_sync_pt(fence->cbs[i].sync_pt); + ret = wait_event_interruptible_timeout(fence->wq, + atomic_read(&fence->status) <= 0, + timeout); + trace_sync_wait(fence, 0); - if (fence->status == 0) { - if (timeout > 0) { + if (ret < 0) + return ret; + else if (ret == 0) { + if (timeout) { pr_info("fence timeout on [%p] after %dms\n", fence, jiffies_to_msecs(timeout)); sync_dump(); @@ -632,15 +398,136 @@ int sync_fence_wait(struct sync_fence *fence, long timeout) return -ETIME; } - return 0; + ret = atomic_read(&fence->status); + if (ret) { + pr_info("fence error %ld on [%p]\n", ret, fence); + sync_dump(); + } + return ret; } EXPORT_SYMBOL(sync_fence_wait); +static const char *android_fence_get_driver_name(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + return parent->ops->driver_name; +} + +static const char *android_fence_get_timeline_name(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + return parent->name; +} + +static void android_fence_release(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + unsigned long flags; + + spin_lock_irqsave(fence->lock, flags); + list_del(&pt->child_list); + if (WARN_ON_ONCE(!list_empty(&pt->active_list))) + list_del(&pt->active_list); + spin_unlock_irqrestore(fence->lock, flags); + + if (parent->ops->free_pt) + parent->ops->free_pt(pt); + + sync_timeline_put(parent); + fence_free(&pt->base); +} + +static bool android_fence_signaled(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + int ret; + + ret = parent->ops->has_signaled(pt); + if (ret < 0) + fence->status = ret; + return ret; +} + +static bool android_fence_enable_signaling(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (android_fence_signaled(fence)) + return false; + + list_add_tail(&pt->active_list, &parent->active_list_head); + return true; +} + +static int android_fence_fill_driver_data(struct fence *fence, + void *data, int size) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (!parent->ops->fill_driver_data) + return 0; + return parent->ops->fill_driver_data(pt, data, size); +} + +static void android_fence_value_str(struct fence *fence, + char *str, int size) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (!parent->ops->pt_value_str) { + if (size) + *str = 0; + return; + } + parent->ops->pt_value_str(pt, str, size); +} + +static void android_fence_timeline_value_str(struct fence *fence, + char *str, int size) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (!parent->ops->timeline_value_str) { + if (size) + *str = 0; + return; + } + parent->ops->timeline_value_str(parent, str, size); +} + +static const struct fence_ops android_fence_ops = { + .get_driver_name = android_fence_get_driver_name, + .get_timeline_name = android_fence_get_timeline_name, + .enable_signaling = android_fence_enable_signaling, + .signaled = android_fence_signaled, + .wait = fence_default_wait, + .release = android_fence_release, + .fill_driver_data = android_fence_fill_driver_data, + .fence_value_str = android_fence_value_str, + .timeline_value_str = android_fence_timeline_value_str, +}; + static void sync_fence_free(struct kref *kref) { struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + int i, status = atomic_read(&fence->status); - sync_fence_free_pts(fence); + for (i = 0; i < fence->num_fences; ++i) { + if (status) + fence_remove_callback(fence->cbs[i].sync_pt, + &fence->cbs[i].cb); + fence_put(fence->cbs[i].sync_pt); + } kfree(fence); } @@ -648,44 +535,25 @@ static void sync_fence_free(struct kref *kref) static int sync_fence_release(struct inode *inode, struct file *file) { struct sync_fence *fence = file->private_data; - unsigned long flags; - - /* - * We need to remove all ways to access this fence before droping - * our ref. - * - * start with its membership in the global fence list - */ - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_del(&fence->sync_fence_list); - spin_unlock_irqrestore(&sync_fence_list_lock, flags); - /* - * remove its pts from their parents so that sync_timeline_signal() - * can't reference the fence. - */ - sync_fence_detach_pts(fence); + sync_fence_debug_remove(fence); kref_put(&fence->kref, sync_fence_free); - return 0; } static unsigned int sync_fence_poll(struct file *file, poll_table *wait) { struct sync_fence *fence = file->private_data; + int status; poll_wait(file, &fence->wq, wait); - /* - * Make sure that reads to fence->status are ordered with the - * wait queue event triggering - */ - smp_rmb(); + status = atomic_read(&fence->status); - if (fence->status == 1) + if (!status) return POLLIN; - else if (fence->status < 0) + else if (status < 0) return POLLERR; else return 0; @@ -750,7 +618,7 @@ err_put_fd: return err; } -static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) +static int sync_fill_pt_info(struct fence *fence, void *data, int size) { struct sync_pt_info *info = data; int ret; @@ -760,20 +628,24 @@ static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) info->len = sizeof(struct sync_pt_info); - if (pt->parent->ops->fill_driver_data) { - ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, - size - sizeof(*info)); + if (fence->ops->fill_driver_data) { + ret = fence->ops->fill_driver_data(fence, info->driver_data, + size - sizeof(*info)); if (ret < 0) return ret; info->len += ret; } - strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); - strlcpy(info->driver_name, pt->parent->ops->driver_name, + strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), + sizeof(info->obj_name)); + strlcpy(info->driver_name, fence->ops->get_driver_name(fence), sizeof(info->driver_name)); - info->status = pt->status; - info->timestamp_ns = ktime_to_ns(pt->timestamp); + if (fence_is_signaled(fence)) + info->status = fence->status >= 0 ? 1 : fence->status; + else + info->status = 0; + info->timestamp_ns = ktime_to_ns(fence->timestamp); return info->len; } @@ -782,10 +654,9 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, unsigned long arg) { struct sync_fence_info_data *data; - struct list_head *pos; __u32 size; __u32 len = 0; - int ret; + int ret, i; if (copy_from_user(&size, (void __user *)arg, sizeof(size))) return -EFAULT; @@ -801,12 +672,14 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, return -ENOMEM; strlcpy(data->name, fence->name, sizeof(data->name)); - data->status = fence->status; + data->status = atomic_read(&fence->status); + if (data->status >= 0) + data->status = !data->status; + len = sizeof(struct sync_fence_info_data); - list_for_each(pos, &fence->pt_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, pt_list); + for (i = 0; i < fence->num_fences; ++i) { + struct fence *pt = fence->cbs[i].sync_pt; ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); @@ -833,7 +706,6 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sync_fence *fence = file->private_data; - switch (cmd) { case SYNC_IOC_WAIT: return sync_fence_ioctl_wait(fence, arg); @@ -849,181 +721,10 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd, } } -#ifdef CONFIG_DEBUG_FS -static const char *sync_status_str(int status) -{ - if (status > 0) - return "signaled"; - else if (status == 0) - return "active"; - else - return "error"; -} - -static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) -{ - int status = pt->status; - - seq_printf(s, " %s%spt %s", - fence ? pt->parent->name : "", - fence ? "_" : "", - sync_status_str(status)); - if (pt->status) { - struct timeval tv = ktime_to_timeval(pt->timestamp); - - seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); - } - - if (pt->parent->ops->timeline_value_str && - pt->parent->ops->pt_value_str) { - char value[64]; - - pt->parent->ops->pt_value_str(pt, value, sizeof(value)); - seq_printf(s, ": %s", value); - if (fence) { - pt->parent->ops->timeline_value_str(pt->parent, value, - sizeof(value)); - seq_printf(s, " / %s", value); - } - } else if (pt->parent->ops->print_pt) { - seq_puts(s, ": "); - pt->parent->ops->print_pt(s, pt); - } - - seq_puts(s, "\n"); -} - -static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) -{ - struct list_head *pos; - unsigned long flags; - - seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); - - if (obj->ops->timeline_value_str) { - char value[64]; - - obj->ops->timeline_value_str(obj, value, sizeof(value)); - seq_printf(s, ": %s", value); - } else if (obj->ops->print_obj) { - seq_puts(s, ": "); - obj->ops->print_obj(s, obj); - } - - seq_puts(s, "\n"); - - spin_lock_irqsave(&obj->child_list_lock, flags); - list_for_each(pos, &obj->child_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, child_list); - sync_print_pt(s, pt, false); - } - spin_unlock_irqrestore(&obj->child_list_lock, flags); -} - -static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) -{ - struct list_head *pos; - unsigned long flags; - - seq_printf(s, "[%p] %s: %s\n", fence, fence->name, - sync_status_str(fence->status)); - - list_for_each(pos, &fence->pt_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, pt_list); - sync_print_pt(s, pt, true); - } - - spin_lock_irqsave(&fence->waiter_list_lock, flags); - list_for_each(pos, &fence->waiter_list_head) { - struct sync_fence_waiter *waiter = - container_of(pos, struct sync_fence_waiter, - waiter_list); - - seq_printf(s, "waiter %pF\n", waiter->callback); - } - spin_unlock_irqrestore(&fence->waiter_list_lock, flags); -} - -static int sync_debugfs_show(struct seq_file *s, void *unused) -{ - unsigned long flags; - struct list_head *pos; - - seq_puts(s, "objs:\n--------------\n"); - - spin_lock_irqsave(&sync_timeline_list_lock, flags); - list_for_each(pos, &sync_timeline_list_head) { - struct sync_timeline *obj = - container_of(pos, struct sync_timeline, - sync_timeline_list); - - sync_print_obj(s, obj); - seq_puts(s, "\n"); - } - spin_unlock_irqrestore(&sync_timeline_list_lock, flags); - - seq_puts(s, "fences:\n--------------\n"); - - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_for_each(pos, &sync_fence_list_head) { - struct sync_fence *fence = - container_of(pos, struct sync_fence, sync_fence_list); - - sync_print_fence(s, fence); - seq_puts(s, "\n"); - } - spin_unlock_irqrestore(&sync_fence_list_lock, flags); - return 0; -} - -static int sync_debugfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, sync_debugfs_show, inode->i_private); -} - -static const struct file_operations sync_debugfs_fops = { - .open = sync_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, +static const struct file_operations sync_fence_fops = { + .release = sync_fence_release, + .poll = sync_fence_poll, + .unlocked_ioctl = sync_fence_ioctl, + .compat_ioctl = sync_fence_ioctl, }; -static __init int sync_debugfs_init(void) -{ - debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); - return 0; -} -late_initcall(sync_debugfs_init); - -#define DUMP_CHUNK 256 -static char sync_dump_buf[64 * 1024]; -static void sync_dump(void) -{ - struct seq_file s = { - .buf = sync_dump_buf, - .size = sizeof(sync_dump_buf) - 1, - }; - int i; - - sync_debugfs_show(&s, NULL); - - for (i = 0; i < s.count; i += DUMP_CHUNK) { - if ((s.count - i) > DUMP_CHUNK) { - char c = s.buf[i + DUMP_CHUNK]; - - s.buf[i + DUMP_CHUNK] = 0; - pr_cont("%s", s.buf + i); - s.buf[i + DUMP_CHUNK] = c; - } else { - s.buf[s.count] = 0; - pr_cont("%s", s.buf + i); - } - } -} -#else -static void sync_dump(void) -{ -} -#endif diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index eaf57cccf62..66b0f431f63 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -19,6 +19,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/wait.h> +#include <linux/fence.h> #include "uapi/sync.h" @@ -40,8 +41,6 @@ struct sync_fence; * -1 if a will signal before b * @free_pt: called before sync_pt is freed * @release_obj: called before sync_timeline is freed - * @print_obj: deprecated - * @print_pt: deprecated * @fill_driver_data: write implementation specific driver data to data. * should return an error if there is not enough room * as specified by size. This information is returned @@ -67,13 +66,6 @@ struct sync_timeline_ops { /* optional */ void (*release_obj)(struct sync_timeline *sync_timeline); - /* deprecated */ - void (*print_obj)(struct seq_file *s, - struct sync_timeline *sync_timeline); - - /* deprecated */ - void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt); - /* optional */ int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); @@ -104,19 +96,21 @@ struct sync_timeline { /* protected by child_list_lock */ bool destroyed; + int context, value; struct list_head child_list_head; spinlock_t child_list_lock; struct list_head active_list_head; - spinlock_t active_list_lock; +#ifdef CONFIG_DEBUG_FS struct list_head sync_timeline_list; +#endif }; /** * struct sync_pt - sync point - * @parent: sync_timeline to which this sync_pt belongs + * @fence: base fence class * @child_list: membership in sync_timeline.child_list_head * @active_list: membership in sync_timeline.active_list_head * @signaled_list: membership in temporary signaled_list on stack @@ -127,19 +121,22 @@ struct sync_timeline { * signaled or error. */ struct sync_pt { - struct sync_timeline *parent; - struct list_head child_list; + struct fence base; + struct list_head child_list; struct list_head active_list; - struct list_head signaled_list; - - struct sync_fence *fence; - struct list_head pt_list; +}; - /* protected by parent->active_list_lock */ - int status; +static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) +{ + return container_of(pt->base.lock, struct sync_timeline, + child_list_lock); +} - ktime_t timestamp; +struct sync_fence_cb { + struct fence_cb cb; + struct fence *sync_pt; + struct sync_fence *fence; }; /** @@ -149,9 +146,7 @@ struct sync_pt { * @name: name of sync_fence. Useful for debugging * @pt_list_head: list of sync_pts in the fence. immutable once fence * is created - * @waiter_list_head: list of asynchronous waiters on this fence - * @waiter_list_lock: lock protecting @waiter_list_head and @status - * @status: 1: signaled, 0:active, <0: error + * @status: 0: signaled, >0:active, <0: error * * @wq: wait queue for fence signaling * @sync_fence_list: membership in global fence list @@ -160,17 +155,15 @@ struct sync_fence { struct file *file; struct kref kref; char name[32]; - - /* this list is immutable once the fence is created */ - struct list_head pt_list_head; - - struct list_head waiter_list_head; - spinlock_t waiter_list_lock; /* also protects status */ - int status; +#ifdef CONFIG_DEBUG_FS + struct list_head sync_fence_list; +#endif + int num_fences; wait_queue_head_t wq; + atomic_t status; - struct list_head sync_fence_list; + struct sync_fence_cb cbs[]; }; struct sync_fence_waiter; @@ -184,14 +177,14 @@ typedef void (*sync_callback_t)(struct sync_fence *fence, * @callback_data: pointer to pass to @callback */ struct sync_fence_waiter { - struct list_head waiter_list; - - sync_callback_t callback; + wait_queue_t work; + sync_callback_t callback; }; static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, sync_callback_t callback) { + INIT_LIST_HEAD(&waiter->work.task_list); waiter->callback = callback; } @@ -341,4 +334,22 @@ int sync_fence_cancel_async(struct sync_fence *fence, */ int sync_fence_wait(struct sync_fence *fence, long timeout); +#ifdef CONFIG_DEBUG_FS + +extern void sync_timeline_debug_add(struct sync_timeline *obj); +extern void sync_timeline_debug_remove(struct sync_timeline *obj); +extern void sync_fence_debug_add(struct sync_fence *fence); +extern void sync_fence_debug_remove(struct sync_fence *fence); +extern void sync_dump(void); + +#else +# define sync_timeline_debug_add(obj) +# define sync_timeline_debug_remove(obj) +# define sync_fence_debug_add(fence) +# define sync_fence_debug_remove(fence) +# define sync_dump() +#endif +int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, + int wake_flags, void *key); + #endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c new file mode 100644 index 00000000000..257fc91bf02 --- /dev/null +++ b/drivers/staging/android/sync_debug.c @@ -0,0 +1,252 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/anon_inodes.h> +#include "sync.h" + +#ifdef CONFIG_DEBUG_FS + +static LIST_HEAD(sync_timeline_list_head); +static DEFINE_SPINLOCK(sync_timeline_list_lock); +static LIST_HEAD(sync_fence_list_head); +static DEFINE_SPINLOCK(sync_fence_list_lock); + +void sync_timeline_debug_add(struct sync_timeline *obj) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); +} + +void sync_timeline_debug_remove(struct sync_timeline *obj) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_del(&obj->sync_timeline_list); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); +} + +void sync_fence_debug_add(struct sync_fence *fence) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); +} + +void sync_fence_debug_remove(struct sync_fence *fence) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_del(&fence->sync_fence_list); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); +} + +static const char *sync_status_str(int status) +{ + if (status == 0) + return "signaled"; + + if (status > 0) + return "active"; + + return "error"; +} + +static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) +{ + int status = 1; + struct sync_timeline *parent = sync_pt_parent(pt); + + if (fence_is_signaled_locked(&pt->base)) + status = pt->base.status; + + seq_printf(s, " %s%spt %s", + fence ? parent->name : "", + fence ? "_" : "", + sync_status_str(status)); + + if (status <= 0) { + struct timeval tv = ktime_to_timeval(pt->base.timestamp); + + seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); + } + + if (parent->ops->timeline_value_str && + parent->ops->pt_value_str) { + char value[64]; + + parent->ops->pt_value_str(pt, value, sizeof(value)); + seq_printf(s, ": %s", value); + if (fence) { + parent->ops->timeline_value_str(parent, value, + sizeof(value)); + seq_printf(s, " / %s", value); + } + } + + seq_puts(s, "\n"); +} + +static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); + + if (obj->ops->timeline_value_str) { + char value[64]; + + obj->ops->timeline_value_str(obj, value, sizeof(value)); + seq_printf(s, ": %s", value); + } + + seq_puts(s, "\n"); + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_for_each(pos, &obj->child_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, child_list); + sync_print_pt(s, pt, false); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +{ + wait_queue_t *pos; + unsigned long flags; + int i; + + seq_printf(s, "[%p] %s: %s\n", fence, fence->name, + sync_status_str(atomic_read(&fence->status))); + + for (i = 0; i < fence->num_fences; ++i) { + struct sync_pt *pt = + container_of(fence->cbs[i].sync_pt, + struct sync_pt, base); + + sync_print_pt(s, pt, true); + } + + spin_lock_irqsave(&fence->wq.lock, flags); + list_for_each_entry(pos, &fence->wq.task_list, task_list) { + struct sync_fence_waiter *waiter; + + if (pos->func != &sync_fence_wake_up_wq) + continue; + + waiter = container_of(pos, struct sync_fence_waiter, work); + + seq_printf(s, "waiter %pF\n", waiter->callback); + } + spin_unlock_irqrestore(&fence->wq.lock, flags); +} + +static int sync_debugfs_show(struct seq_file *s, void *unused) +{ + unsigned long flags; + struct list_head *pos; + + seq_puts(s, "objs:\n--------------\n"); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_for_each(pos, &sync_timeline_list_head) { + struct sync_timeline *obj = + container_of(pos, struct sync_timeline, + sync_timeline_list); + + sync_print_obj(s, obj); + seq_puts(s, "\n"); + } + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + seq_puts(s, "fences:\n--------------\n"); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_for_each(pos, &sync_fence_list_head) { + struct sync_fence *fence = + container_of(pos, struct sync_fence, sync_fence_list); + + sync_print_fence(s, fence); + seq_puts(s, "\n"); + } + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + return 0; +} + +static int sync_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, sync_debugfs_show, inode->i_private); +} + +static const struct file_operations sync_debugfs_fops = { + .open = sync_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static __init int sync_debugfs_init(void) +{ + debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + return 0; +} +late_initcall(sync_debugfs_init); + +#define DUMP_CHUNK 256 +static char sync_dump_buf[64 * 1024]; +void sync_dump(void) +{ + struct seq_file s = { + .buf = sync_dump_buf, + .size = sizeof(sync_dump_buf) - 1, + }; + int i; + + sync_debugfs_show(&s, NULL); + + for (i = 0; i < s.count; i += DUMP_CHUNK) { + if ((s.count - i) > DUMP_CHUNK) { + char c = s.buf[i + DUMP_CHUNK]; + + s.buf[i + DUMP_CHUNK] = 0; + pr_cont("%s", s.buf + i); + s.buf[i + DUMP_CHUNK] = c; + } else { + s.buf[s.count] = 0; + pr_cont("%s", s.buf + i); + } + } +} + +#endif diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h index 95462359ba5..77edb977a7b 100644 --- a/drivers/staging/android/trace/sync.h +++ b/drivers/staging/android/trace/sync.h @@ -45,7 +45,7 @@ TRACE_EVENT(sync_wait, TP_fast_assign( __assign_str(name, fence->name); - __entry->status = fence->status; + __entry->status = atomic_read(&fence->status); __entry->begin = begin; ), @@ -54,19 +54,19 @@ TRACE_EVENT(sync_wait, ); TRACE_EVENT(sync_pt, - TP_PROTO(struct sync_pt *pt), + TP_PROTO(struct fence *pt), TP_ARGS(pt), TP_STRUCT__entry( - __string(timeline, pt->parent->name) + __string(timeline, pt->ops->get_timeline_name(pt)) __array(char, value, 32) ), TP_fast_assign( - __assign_str(timeline, pt->parent->name); - if (pt->parent->ops->pt_value_str) { - pt->parent->ops->pt_value_str(pt, __entry->value, + __assign_str(timeline, pt->ops->get_timeline_name(pt)); + if (pt->ops->fence_value_str) { + pt->ops->fence_value_str(pt, __entry->value, sizeof(__entry->value)); } else { __entry->value[0] = '\0'; |