summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h70
1 files changed, 45 insertions, 25 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cbbfd98ad4a..0e67c45b3bc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -73,7 +73,7 @@ enum rq_cmd_type_bits {
/*
* try to put the fields that are referenced together in the same cacheline.
- * if you modify this structure, be sure to check block/blk-core.c:rq_init()
+ * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
* as well!
*/
struct request {
@@ -257,11 +257,10 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char cluster;
- signed char discard_zeroes_data;
+ unsigned char discard_zeroes_data;
};
-struct request_queue
-{
+struct request_queue {
/*
* Together with queue_head for cacheline sharing
*/
@@ -304,14 +303,14 @@ struct request_queue
void *queuedata;
/*
- * queue needs bounce pages for pages above this limit
+ * various queue flags, see QUEUE_* below
*/
- gfp_t bounce_gfp;
+ unsigned long queue_flags;
/*
- * various queue flags, see QUEUE_* below
+ * queue needs bounce pages for pages above this limit
*/
- unsigned long queue_flags;
+ gfp_t bounce_gfp;
/*
* protects queue structures from reentrancy. ->__queue_lock should
@@ -334,8 +333,8 @@ struct request_queue
unsigned int nr_congestion_off;
unsigned int nr_batching;
- void *dma_drain_buffer;
unsigned int dma_drain_size;
+ void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
@@ -364,6 +363,8 @@ struct request_queue
* for flush operations
*/
unsigned int flush_flags;
+ unsigned int flush_not_queueable:1;
+ unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
unsigned long flush_pending_since;
@@ -388,20 +389,20 @@ struct request_queue
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
-#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
-#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
+#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
+#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
+#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -699,6 +700,7 @@ extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
+extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
+extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -854,12 +857,21 @@ struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
+/*
+ * Note: Code in between changing the blk_plug list/cb_list or element of such
+ * lists is preemptable, but such code can't do sleep (or be very careful),
+ * otherwise data is corrupted. For details, please check schedule() where
+ * blk_schedule_flush_plug() is called.
+ */
struct blk_plug {
unsigned long magic;
struct list_head list;
struct list_head cb_list;
unsigned int should_sort;
+ unsigned int count;
};
+#define BLK_MAX_REQUEST_COUNT 16
+
struct blk_plug_cb {
struct list_head list;
void (*callback)(struct blk_plug_cb *);
@@ -1066,13 +1078,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
{
unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+ if (!lim->max_discard_sectors)
+ return 0;
+
return (lim->discard_granularity + lim->discard_alignment - alignment)
& (lim->discard_granularity - 1);
}
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
{
- if (q->limits.discard_zeroes_data == 1)
+ if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
return 1;
return 0;
@@ -1111,6 +1126,11 @@ static inline unsigned int block_size(struct block_device *bdev)
return bdev->bd_block_size;
}
+static inline bool queue_flush_queueable(struct request_queue *q)
+{
+ return !q->flush_not_queueable;
+}
+
typedef struct {struct page *v;} Sector;
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
@@ -1271,8 +1291,8 @@ queue_max_integrity_segments(struct request_queue *q)
#define blk_get_integrity(a) (0)
#define blk_integrity_compare(a, b) (0)
#define blk_integrity_register(a, b) (0)
-#define blk_integrity_unregister(a) do { } while (0);
-#define blk_queue_max_integrity_segments(a, b) do { } while (0);
+#define blk_integrity_unregister(a) do { } while (0)
+#define blk_queue_max_integrity_segments(a, b) do { } while (0)
#define queue_max_integrity_segments(a) (0)
#define blk_integrity_merge_rq(a, b, c) (0)
#define blk_integrity_merge_bio(a, b, c) (0)