summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h27
1 files changed, 18 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1a23722e887..0e67c45b3bc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -73,7 +73,7 @@ enum rq_cmd_type_bits {
/*
* try to put the fields that are referenced together in the same cacheline.
- * if you modify this structure, be sure to check block/blk-core.c:rq_init()
+ * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
* as well!
*/
struct request {
@@ -260,8 +260,7 @@ struct queue_limits {
unsigned char discard_zeroes_data;
};
-struct request_queue
-{
+struct request_queue {
/*
* Together with queue_head for cacheline sharing
*/
@@ -304,14 +303,14 @@ struct request_queue
void *queuedata;
/*
- * queue needs bounce pages for pages above this limit
+ * various queue flags, see QUEUE_* below
*/
- gfp_t bounce_gfp;
+ unsigned long queue_flags;
/*
- * various queue flags, see QUEUE_* below
+ * queue needs bounce pages for pages above this limit
*/
- unsigned long queue_flags;
+ gfp_t bounce_gfp;
/*
* protects queue structures from reentrancy. ->__queue_lock should
@@ -334,8 +333,8 @@ struct request_queue
unsigned int nr_congestion_off;
unsigned int nr_batching;
- void *dma_drain_buffer;
unsigned int dma_drain_size;
+ void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
@@ -393,7 +392,7 @@ struct request_queue
#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
+#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
@@ -403,6 +402,7 @@ struct request_queue
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -857,12 +857,21 @@ struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
+/*
+ * Note: Code in between changing the blk_plug list/cb_list or element of such
+ * lists is preemptable, but such code can't do sleep (or be very careful),
+ * otherwise data is corrupted. For details, please check schedule() where
+ * blk_schedule_flush_plug() is called.
+ */
struct blk_plug {
unsigned long magic;
struct list_head list;
struct list_head cb_list;
unsigned int should_sort;
+ unsigned int count;
};
+#define BLK_MAX_REQUEST_COUNT 16
+
struct blk_plug_cb {
struct list_head list;
void (*callback)(struct blk_plug_cb *);