summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h55
1 files changed, 38 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc84..e03660964e0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
typedef void (rq_end_io_fn)(struct request *, int);
struct request_list {
+ /*
+ * count[], starved[], and wait[] are indexed by
+ * BLK_RW_SYNC/BLK_RW_ASYNC
+ */
int count[2];
int starved[2];
int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
REQ_TYPE_ATA_PC,
};
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
/*
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,12 +112,13 @@ enum rq_flag_bits {
__REQ_QUIET, /* don't worry about errors */
__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_ORDERED_COLOR, /* is before or after barrier */
- __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
+ __REQ_RW_SYNC, /* request is sync (sync write or read) */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_UNPLUG, /* unplug queue on submission */
+ __REQ_NOIDLE, /* Don't anticipate more IO after this one */
__REQ_NR_BITS, /* stops here */
};
@@ -136,6 +146,7 @@ enum rq_flag_bits {
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
+#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define BLK_MAX_CDB 16
@@ -438,8 +449,8 @@ struct request_queue
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
+#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
+#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -611,32 +622,42 @@ enum {
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
/*
- * We regard a request as sync, if it's a READ or a SYNC write.
+ * We regard a request as sync, if either a read or a sync write
*/
-#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+static inline bool rw_is_sync(unsigned int rw_flags)
+{
+ return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
+}
+
+static inline bool rq_is_sync(struct request *rq)
+{
+ return rw_is_sync(rq->cmd_flags);
+}
+
#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
+#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
-static inline int blk_queue_full(struct request_queue *q, int rw)
+static inline int blk_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
- return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+ if (sync)
+ return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
+ return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
}
-static inline void blk_set_queue_full(struct request_queue *q, int rw)
+static inline void blk_set_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- queue_flag_set(QUEUE_FLAG_READFULL, q);
+ if (sync)
+ queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
else
- queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
+ queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
}
-static inline void blk_clear_queue_full(struct request_queue *q, int rw)
+static inline void blk_clear_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- queue_flag_clear(QUEUE_FLAG_READFULL, q);
+ if (sync)
+ queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
else
- queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
+ queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
}