summaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2014-09-05 16:53:56 +0200
committerArnd Bergmann <arnd@arndb.de>2014-09-05 16:53:56 +0200
commit184df9ddaab4a572e61b321abc079ca49155fc12 (patch)
tree5c99704d8508224b25552f24959b54772d8eec1e /fs/aio.c
parent647f95fa99b16e7c7854a202e91e6aa22ebeecf4 (diff)
parent13298fbbdb3f6a0ef55419dc048e064c7a7b0ef8 (diff)
Merge tag 'renesas-kconfig-cleanups-for-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas into next/cleanup
Pull "Renesas ARM Based SoC Kconfig Cleanups for v3.18" from Simon Horman: * Update name of "R-Car M2-W" SoC (previously there was no "-W") * Consolidate Legacy SH_CLK_CPG and CPU_V7 Kconfig * Only select PM_RMOBILE for legacy case * Cleanup pm-rcar.o and pm-rmobile.o build using Kconfig Signed-off-by: Arnd Bergmann <arnd@arndb.de> * tag 'renesas-kconfig-cleanups-for-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas: ARM: shmobile: r8a7791 is now called "R-Car M2-W" ARM: shmobile: Consolidate Legacy SH_CLK_CPG Kconfig ARM: shmobile: Consolidate Legacy CPU_V7 Kconfig ARM: shmobile: Only select PM_RMOBILE for legacy case ARM: shmobile: Cleanup pm-rmobile.o build using Kconfig ARM: shmobile: Cleanup pm-rcar.o build using Kconfig ARM: shmobile: Introduce a Kconfig entry for R-Car Gen2 ARM: shmobile: Introduce a Kconfig entry for R-Car Gen1 ARM: shmobile: Introduce a Kconfig entry for R-Mobile Includes an update to 3.17-rc2 to avoid a dependency
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c77
1 files changed, 73 insertions, 4 deletions
diff --git a/fs/aio.c b/fs/aio.c
index ae635872aff..97bc62cbe2d 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,6 +141,7 @@ struct kioctx {
struct {
unsigned tail;
+ unsigned completed_events;
spinlock_t completion_lock;
} ____cacheline_aligned_in_smp;
@@ -857,6 +858,68 @@ out:
return ret;
}
+/* refill_reqs_available
+ * Updates the reqs_available reference counts used for tracking the
+ * number of free slots in the completion ring. This can be called
+ * from aio_complete() (to optimistically update reqs_available) or
+ * from aio_get_req() (the we're out of events case). It must be
+ * called holding ctx->completion_lock.
+ */
+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
+ unsigned tail)
+{
+ unsigned events_in_ring, completed;
+
+ /* Clamp head since userland can write to it. */
+ head %= ctx->nr_events;
+ if (head <= tail)
+ events_in_ring = tail - head;
+ else
+ events_in_ring = ctx->nr_events - (head - tail);
+
+ completed = ctx->completed_events;
+ if (events_in_ring < completed)
+ completed -= events_in_ring;
+ else
+ completed = 0;
+
+ if (!completed)
+ return;
+
+ ctx->completed_events -= completed;
+ put_reqs_available(ctx, completed);
+}
+
+/* user_refill_reqs_available
+ * Called to refill reqs_available when aio_get_req() encounters an
+ * out of space in the completion ring.
+ */
+static void user_refill_reqs_available(struct kioctx *ctx)
+{
+ spin_lock_irq(&ctx->completion_lock);
+ if (ctx->completed_events) {
+ struct aio_ring *ring;
+ unsigned head;
+
+ /* Access of ring->head may race with aio_read_events_ring()
+ * here, but that's okay since whether we read the old version
+ * or the new version, and either will be valid. The important
+ * part is that head cannot pass tail since we prevent
+ * aio_complete() from updating tail by holding
+ * ctx->completion_lock. Even if head is invalid, the check
+ * against ctx->completed_events below will make sure we do the
+ * safe/right thing.
+ */
+ ring = kmap_atomic(ctx->ring_pages[0]);
+ head = ring->head;
+ kunmap_atomic(ring);
+
+ refill_reqs_available(ctx, head, ctx->tail);
+ }
+
+ spin_unlock_irq(&ctx->completion_lock);
+}
+
/* aio_get_req
* Allocate a slot for an aio request.
* Returns NULL if no requests are free.
@@ -865,8 +928,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
{
struct kiocb *req;
- if (!get_reqs_available(ctx))
- return NULL;
+ if (!get_reqs_available(ctx)) {
+ user_refill_reqs_available(ctx);
+ if (!get_reqs_available(ctx))
+ return NULL;
+ }
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
if (unlikely(!req))
@@ -925,8 +991,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
+ unsigned tail, pos, head;
unsigned long flags;
- unsigned tail, pos;
/*
* Special case handling for sync iocbs:
@@ -987,10 +1053,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
ctx->tail = tail;
ring = kmap_atomic(ctx->ring_pages[0]);
+ head = ring->head;
ring->tail = tail;
kunmap_atomic(ring);
flush_dcache_page(ctx->ring_pages[0]);
+ ctx->completed_events++;
+ if (ctx->completed_events > 1)
+ refill_reqs_available(ctx, head, tail);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
pr_debug("added to ring %p at [%u]\n", iocb, tail);
@@ -1005,7 +1075,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
/* everything turned out well, dispose of the aiocb. */
kiocb_free(iocb);
- put_reqs_available(ctx, 1);
/*
* We have to order our ring_info tail store above and test