summaryrefslogtreecommitdiffstats
path: root/block/blktrace.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-08-10 12:33:00 +0100
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 08:56:01 +0200
commit35ba8f7083e87602b695d6eaca38a6464d5b74db (patch)
treedd9e515480ec307cf12378f13b01c7922cf3266c /block/blktrace.c
parent27b29e86bf3d4b3cf6641a0efd78ed11a9b633b2 (diff)
blktrace: simplify flags handling in __blk_add_trace
Let the compiler see what's going on, and it can all get a lot simpler. On PPC64 this reduces the size of the code calculating these bits by about 60%. On x86_64 it's less of a win -- only 40%. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blktrace.c')
-rw-r--r--block/blktrace.c38
1 files changed, 8 insertions, 30 deletions
diff --git a/block/blktrace.c b/block/blktrace.c
index 7495a84353e..9e0212c90b2 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -111,31 +111,9 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
*/
static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
-/*
- * Bio action bits of interest
- */
-static u32 bio_act[17] __read_mostly = {
- [1] = BLK_TC_ACT(BLK_TC_BARRIER),
- [2] = BLK_TC_ACT(BLK_TC_SYNC),
- [4] = BLK_TC_ACT(BLK_TC_AHEAD),
- [8] = BLK_TC_ACT(BLK_TC_META),
- [16] = BLK_TC_ACT(BLK_TC_DISCARD)
-};
-
-/*
- * More could be added as needed, taking care to increment the decrementer
- * to get correct indexing
- */
-#define trace_barrier_bit(rw) \
- (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
-#define trace_sync_bit(rw) \
- (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
-#define trace_ahead_bit(rw) \
- (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
-#define trace_meta_bit(rw) \
- (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
-#define trace_discard_bit(rw) \
- (((rw) & (1 << BIO_RW_DISCARD)) >> (BIO_RW_DISCARD - 4))
+/* The ilog2() calls fall out because they're constant */
+#define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
+ (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
/*
* The worker for the various blk_add_trace*() types. Fills out a
@@ -155,11 +133,11 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
return;
what |= ddir_act[rw & WRITE];
- what |= bio_act[trace_barrier_bit(rw)];
- what |= bio_act[trace_sync_bit(rw)];
- what |= bio_act[trace_ahead_bit(rw)];
- what |= bio_act[trace_meta_bit(rw)];
- what |= bio_act[trace_discard_bit(rw)];
+ what |= MASK_TC_BIT(rw, BARRIER);
+ what |= MASK_TC_BIT(rw, SYNC);
+ what |= MASK_TC_BIT(rw, AHEAD);
+ what |= MASK_TC_BIT(rw, META);
+ what |= MASK_TC_BIT(rw, DISCARD);
pid = tsk->pid;
if (unlikely(act_log_check(bt, what, sector, pid)))