summaryrefslogtreecommitdiffstats
path: root/lib/cpu_rmap.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-01-21 07:44:58 +1000
committerDave Airlie <airlied@redhat.com>2013-01-21 07:44:58 +1000
commit735dc0d1e29329ff34ec97f66e130cce481c9607 (patch)
treecf946856ff1defac833e601a3e4a4d8e841ee73e /lib/cpu_rmap.c
parentbac4b7c3b5c0660c08dc4949fe40e08e20364ee3 (diff)
parent20c60c35de3285222b3476c3445c66bedf0c449c (diff)
Merge branch 'drm-kms-locking' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
The aim of this locking rework is that ioctls which a compositor should be might call for every frame (set_cursor, page_flip, addfb, rmfb and getfb/create_handle) should not be able to block on kms background activities like output detection. And since each EDID read takes about 25ms (in the best case), that always means we'll drop at least one frame. The solution is to add per-crtc locking for these ioctls, and restrict background activities to only use the global lock. Change-the-world type of events (modeset, dpms, ...) need to grab all locks. Two tricky parts arose in the conversion: - A lot of current code assumes that a kms fb object can't disappear while holding the global lock, since the current code serializes fb destruction with it. Hence proper lifetime management using the already created refcounting for fbs need to be instantiated for all ioctls and interfaces/users. - The rmfb ioctl removes the to-be-deleted fb from all active users. But unconditionally taking the global kms lock to do so introduces an unacceptable potential stall point. And obviously changing the userspace abi isn't on the table, either. Hence this conversion opportunistically checks whether the rmfb ioctl holds the very last reference, which guarantees that the fb isn't in active use on any crtc or plane (thanks to the conversion to the new lifetime rules using proper refcounting). Only if this is not the case will the code go through the slowpath and grab all modeset locks. Sane compositors will never hit this path and so avoid the stall, but userspace relying on these semantics will also not break. All these cases are exercised by the newly added subtests for the i-g-t kms_flip, tested on a machine where a full detect cycle takes around 100 ms. It works, and no frames are dropped any more with these patches applied. kms_flip also contains a special case to exercise the above-describe rmfb slowpath. * 'drm-kms-locking' of git://people.freedesktop.org/~danvet/drm-intel: (335 commits) drm/fb_helper: check whether fbcon is bound drm/doc: updates for new framebuffer lifetime rules drm: don't hold crtc mutexes for connector ->detect callbacks drm: only grab the crtc lock for pageflips drm: optimize drm_framebuffer_remove drm/vmwgfx: add proper framebuffer refcounting drm/i915: dump refcount into framebuffer debugfs file drm: refcounting for crtc framebuffers drm: refcounting for sprite framebuffers drm: fb refcounting for dirtyfb_ioctl drm: don't take modeset locks in getfb ioctl drm: push modeset_lock_all into ->fb_create driver callbacks drm: nest modeset locks within fpriv->fbs_lock drm: reference framebuffers which are on the idr drm: revamp framebuffer cleanup interfaces drm: create drm_framebuffer_lookup drm: revamp locking around fb creation/destruction drm: only take the crtc lock for ->cursor_move drm: only take the crtc lock for ->cursor_set drm: add per-crtc locks ...
Diffstat (limited to 'lib/cpu_rmap.c')
-rw-r--r--lib/cpu_rmap.c54
1 files changed, 49 insertions, 5 deletions
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 145dec5267c..5fbed5caba6 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
if (!rmap)
return NULL;
+ kref_init(&rmap->refcount);
rmap->obj = (void **)((char *)rmap + obj_offset);
/* Initially assign CPUs to objects on a rota, since we have
@@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
}
EXPORT_SYMBOL(alloc_cpu_rmap);
+/**
+ * cpu_rmap_release - internal reclaiming helper called from kref_put
+ * @ref: kref to struct cpu_rmap
+ */
+static void cpu_rmap_release(struct kref *ref)
+{
+ struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
+ kfree(rmap);
+}
+
+/**
+ * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+static inline void cpu_rmap_get(struct cpu_rmap *rmap)
+{
+ kref_get(&rmap->refcount);
+}
+
+/**
+ * cpu_rmap_put - release ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+int cpu_rmap_put(struct cpu_rmap *rmap)
+{
+ return kref_put(&rmap->refcount, cpu_rmap_release);
+}
+EXPORT_SYMBOL(cpu_rmap_put);
+
/* Reevaluate nearest object for given CPU, comparing with the given
* neighbours at the given distance.
*/
@@ -197,8 +227,7 @@ struct irq_glue {
* free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
* @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
*
- * Must be called in process context, before freeing the IRQs, and
- * without holding any locks required by global workqueue items.
+ * Must be called in process context, before freeing the IRQs.
*/
void free_irq_cpu_rmap(struct cpu_rmap *rmap)
{
@@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
glue = rmap->obj[index];
irq_set_affinity_notifier(glue->notify.irq, NULL);
}
- irq_run_affinity_notifiers();
- kfree(rmap);
+ cpu_rmap_put(rmap);
}
EXPORT_SYMBOL(free_irq_cpu_rmap);
+/**
+ * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
+ * @notify: struct irq_affinity_notify passed by irq/manage.c
+ * @mask: cpu mask for new SMP affinity
+ *
+ * This is executed in workqueue context.
+ */
static void
irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
{
@@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
}
+/**
+ * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
+ * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
+ */
static void irq_cpu_rmap_release(struct kref *ref)
{
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
+
+ cpu_rmap_put(glue->rmap);
kfree(glue);
}
@@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
glue->notify.notify = irq_cpu_rmap_notify;
glue->notify.release = irq_cpu_rmap_release;
glue->rmap = rmap;
+ cpu_rmap_get(rmap);
glue->index = cpu_rmap_add(rmap, glue);
rc = irq_set_affinity_notifier(irq, &glue->notify);
- if (rc)
+ if (rc) {
+ cpu_rmap_put(glue->rmap);
kfree(glue);
+ }
return rc;
}
EXPORT_SYMBOL(irq_cpu_rmap_add);