summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-iopoll.c3
-rw-r--r--block/blk-mq-cpumap.c10
-rw-r--r--block/blk-mq-sysfs.c31
-rw-r--r--block/blk-mq.c70
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-softirq.c19
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/partitions/atari.h4
-rw-r--r--block/partitions/efi.h9
-rw-r--r--block/partitions/karma.c3
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c206
-rw-r--r--drivers/scsi/ipr.c15
-rw-r--r--fs/bio-integrity.c84
-rw-r--r--fs/bio.c1
-rw-r--r--include/linux/blk-iopoll.h2
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/elevator.h11
-rw-r--r--include/linux/smp.h8
-rw-r--r--include/trace/events/block.h33
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/smp.c139
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/trace/blktrace.c20
-rw-r--r--kernel/up.c6
-rw-r--r--kernel/watchdog.c3
-rw-r--r--net/core/dev.c4
33 files changed, 352 insertions, 378 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4e491d9b529..b6e95b5e262 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -336,7 +336,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* under queue_lock. If it's not pointing to @blkg now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+ if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
diff --git a/block/blk-core.c b/block/blk-core.c
index bfe16d5af9f..e45b321cf6a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2353,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (!req->bio)
return false;
- trace_block_rq_complete(req->q, req);
+ trace_block_rq_complete(req->q, req, nr_bytes);
/*
* For fs requests, rq is just carrier of independent bio's
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 242df01413f..1a27f45ec77 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -68,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
* under queue_lock. If it's not pointing to @icq now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(ioc->icq_hint) == icq)
+ if (rcu_access_pointer(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
ioc_exit_icq(icq);
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 1855bf51edb..c11d24e379e 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -14,9 +14,6 @@
#include "blk.h"
-int blk_iopoll_enabled = 1;
-EXPORT_SYMBOL(blk_iopoll_enabled);
-
static unsigned int blk_iopoll_budget __read_mostly = 256;
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index f8721278601..09792132961 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -9,15 +9,6 @@
#include "blk.h"
#include "blk-mq.h"
-static void show_map(unsigned int *map, unsigned int nr)
-{
- int i;
-
- pr_info("blk-mq: CPU -> queue map\n");
- for_each_online_cpu(i)
- pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
-}
-
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
const int cpu)
{
@@ -85,7 +76,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
map[i] = map[first_sibling];
}
- show_map(map, nr_cpus);
free_cpumask_var(cpus);
return 0;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b91ce75bd35..b0ba264b052 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -244,6 +244,32 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
return blk_mq_tag_sysfs_show(hctx->tags, page);
}
+static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ unsigned int i, queue_num, first = 1;
+ ssize_t ret = 0;
+
+ blk_mq_disable_hotplug();
+
+ for_each_online_cpu(i) {
+ queue_num = hctx->queue->mq_map[i];
+ if (queue_num != hctx->queue_num)
+ continue;
+
+ if (first)
+ ret += sprintf(ret + page, "%u", i);
+ else
+ ret += sprintf(ret + page, ", %u", i);
+
+ first = 0;
+ }
+
+ blk_mq_enable_hotplug();
+
+ ret += sprintf(ret + page, "\n");
+ return ret;
+}
+
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
@@ -294,6 +320,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show,
};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
+ .attr = {.name = "cpu_list", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_cpus_show,
+};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -302,6 +332,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_ipi.attr,
&blk_mq_hw_sysfs_tags.attr,
+ &blk_mq_hw_sysfs_cpus.attr,
NULL,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 883f7208901..b1bcc619d0e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -320,7 +320,7 @@ void __blk_mq_complete_request(struct request *rq)
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
- __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
rq->q->softirq_done_fn(rq);
}
@@ -514,7 +514,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
LIST_HEAD(rq_list);
int bit, queued;
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
hctx->run++;
@@ -603,7 +603,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
if (!async)
@@ -623,7 +623,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
queue_for_each_hw_ctx(q, hctx, i) {
if ((!blk_mq_hctx_has_pending(hctx) &&
list_empty_careful(&hctx->dispatch)) ||
- test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
+ test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
blk_mq_run_hw_queue(hctx, async);
@@ -994,8 +994,46 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
blk_mq_put_ctx(ctx);
}
-static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < hctx->queue_depth; i++) {
+ struct request *rq = hctx->rqs[i];
+
+ ret = init(data, hctx, rq, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int blk_mq_init_commands(struct request_queue *q,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+ int ret = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ ret = blk_mq_init_hw_commands(hctx, init, data);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_mq_init_commands);
+
+static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1004,12 +1042,12 @@ static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
for (i = 0; i < hctx->queue_depth; i++) {
struct request *rq = hctx->rqs[i];
- init(data, hctx, rq, i);
+ free(data, hctx, rq, i);
}
}
-void blk_mq_init_commands(struct request_queue *q,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+void blk_mq_free_commands(struct request_queue *q,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1017,9 +1055,9 @@ void blk_mq_init_commands(struct request_queue *q,
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_init_hw_commands(hctx, init, data);
+ blk_mq_free_hw_commands(hctx, free, data);
}
-EXPORT_SYMBOL(blk_mq_init_commands);
+EXPORT_SYMBOL(blk_mq_free_commands);
static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
{
@@ -1430,6 +1468,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
+void blk_mq_disable_hotplug(void)
+{
+ mutex_lock(&all_q_mutex);
+}
+
+void blk_mq_enable_hotplug(void)
+{
+ mutex_unlock(&all_q_mutex);
+}
+
static int __init blk_mq_init(void)
{
blk_mq_cpu_init();
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 72beba1f9d5..ebbe6bac9d6 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -39,6 +39,8 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_cpu_init(void);
+void blk_mq_enable_hotplug(void);
+void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 57790c1a97e..ebd6b6f1bde 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
while (!list_empty(&local_list)) {
struct request *rq;
- rq = list_entry(local_list.next, struct request, csd.list);
- list_del_init(&rq->csd.list);
+ rq = list_entry(local_list.next, struct request, queuelist);
+ list_del_init(&rq->queuelist);
rq->q->softirq_done_fn(rq);
}
}
@@ -45,9 +45,14 @@ static void trigger_softirq(void *data)
local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->csd.list, list);
+ /*
+ * We reuse queuelist for a list of requests to process. Since the
+ * queuelist is used by the block layer only for requests waiting to be
+ * submitted to the device it is unused now.
+ */
+ list_add_tail(&rq->queuelist, list);
- if (list->next == &rq->csd.list)
+ if (list->next == &rq->queuelist)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -65,7 +70,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
data->info = rq;
data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
+ smp_call_function_single_async(cpu, data);
return 0;
}
@@ -136,7 +141,7 @@ void __blk_complete_request(struct request *req)
struct list_head *list;
do_local:
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&req->csd.list, list);
+ list_add_tail(&req->queuelist, list);
/*
* if the list only contains our just added request,
@@ -144,7 +149,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
- if (list->next == &req->csd.list)
+ if (list->next == &req->queuelist)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 744833b630c..5873e4ada9e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2367,10 +2367,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+ time_before(next->fifo_time, rq->fifo_time) &&
cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
- rq_set_fifo_time(rq, rq_fifo_time(next));
+ rq->fifo_time = next->fifo_time;
}
if (cfqq->next_rq == next)
@@ -2814,7 +2814,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
return NULL;
rq = rq_entry_fifo(cfqq->fifo.next);
- if (time_before(jiffies, rq_fifo_time(rq)))
+ if (time_before(jiffies, rq->fifo_time))
rq = NULL;
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -3927,7 +3927,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq));
- rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
+ rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9ef66406c62..a753df2b3fc 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* set expire time and add to fifo list
*/
- rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}
@@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
* and move into next position (next will be deleted) in fifo
*/
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ if (time_before(next->fifo_time, req->fifo_time)) {
list_move(&req->queuelist, &next->queuelist);
- rq_set_fifo_time(req, rq_fifo_time(next));
+ req->fifo_time = next->fifo_time;
}
}
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after_eq(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq->fifo_time))
return 1;
return 0;
diff --git a/block/partitions/atari.h b/block/partitions/atari.h
index fe2d32a89f3..f2ec43bfeec 100644
--- a/block/partitions/atari.h
+++ b/block/partitions/atari.h
@@ -11,6 +11,8 @@
* by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
*/
+#include <linux/compiler.h>
+
struct partition_info
{
u8 flg; /* bit 0: active; bit 7: bootable */
@@ -29,6 +31,6 @@ struct rootsector
u32 bsl_st; /* start of bad sector list */
u32 bsl_cnt; /* length of bad sector list */
u16 checksum; /* checksum for bootable disks */
-} __attribute__((__packed__));
+} __packed;
int atari_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index 4efcafba7e6..abd0b19288a 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -32,6 +32,7 @@
#include <linux/major.h>
#include <linux/string.h>
#include <linux/efi.h>
+#include <linux/compiler.h>
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xEF
@@ -87,13 +88,13 @@ typedef struct _gpt_header {
*
* uint8_t reserved2[ BlockSize - 92 ];
*/
-} __attribute__ ((packed)) gpt_header;
+} __packed gpt_header;
typedef struct _gpt_entry_attributes {
u64 required_to_function:1;
u64 reserved:47;
u64 type_guid_specific:16;
-} __attribute__ ((packed)) gpt_entry_attributes;
+} __packed gpt_entry_attributes;
typedef struct _gpt_entry {
efi_guid_t partition_type_guid;
@@ -102,7 +103,7 @@ typedef struct _gpt_entry {
__le64 ending_lba;
gpt_entry_attributes attributes;
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
-} __attribute__ ((packed)) gpt_entry;
+} __packed gpt_entry;
typedef struct _gpt_mbr_record {
u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
@@ -124,7 +125,7 @@ typedef struct _legacy_mbr {
__le16 unknown;
gpt_mbr_record partition_record[4];
__le16 signature;
-} __attribute__ ((packed)) legacy_mbr;
+} __packed legacy_mbr;
/* Functions */
extern int efi_partition(struct parsed_partitions *state);
diff --git a/block/partitions/karma.c b/block/partitions/karma.c
index 0ea19312706..9721fa589bb 100644
--- a/block/partitions/karma.c
+++ b/block/partitions/karma.c
@@ -8,6 +8,7 @@
#include "check.h"
#include "karma.h"
+#include <linux/compiler.h>
int karma_partition(struct parsed_partitions *state)
{
@@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state)
} d_partitions[2];
u8 d_blank[208];
__le16 d_magic;
- } __attribute__((packed)) *label;
+ } __packed *label;
struct d_partition *p;
data = read_part_sector(state, 0, &sect);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b1cb3f4c4db..0eace43cea1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -490,13 +490,14 @@ static struct blk_mq_reg virtio_mq_reg = {
.flags = BLK_MQ_F_SHOULD_MERGE,
};
-static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
+static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
struct request *rq, unsigned int nr)
{
struct virtio_blk *vblk = data;
struct virtblk_req *vbr = rq->special;
sg_init_table(vbr->sg, vblk->sg_elems);
+ return 0;
}
static int virtblk_probe(struct virtio_device *vdev)
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index e952936418d..cb6654bfad7 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -323,7 +323,7 @@ static void cpuidle_coupled_poke(int cpu)
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
- __smp_call_function_single(cpu, csd, 0);
+ smp_call_function_single_async(cpu, csd);
}
/**
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 9be818f7b26..0d822297aa8 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -898,7 +898,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
struct be_queue_info *cq;
unsigned int num_eq_processed;
struct be_eq_obj *pbe_eq;
- unsigned long flags;
pbe_eq = dev_id;
eq = &pbe_eq->q;
@@ -907,31 +906,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
phba = pbe_eq->phba;
num_eq_processed = 0;
- if (blk_iopoll_enabled) {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
- blk_iopoll_sched(&pbe_eq->iopoll);
-
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
- } else {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
- if (pbe_eq->todo_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
}
if (num_eq_processed)
@@ -952,7 +935,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
struct hwi_context_memory *phwi_context;
struct be_eq_entry *eqe = NULL;
struct be_queue_info *eq;
- struct be_queue_info *cq;
struct be_queue_info *mcc;
unsigned long flags, index;
unsigned int num_mcceq_processed, num_ioeq_processed;
@@ -978,72 +960,40 @@ static irqreturn_t be_isr(int irq, void *dev_id)
num_ioeq_processed = 0;
num_mcceq_processed = 0;
- if (blk_iopoll_enabled) {
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- if (((eqe->dw[offsetof(struct amap_eq_entry,
- resource_id) / 32] &
- EQE_RESID_MASK) >> 16) == mcc->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- num_mcceq_processed++;
- } else {
- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
- blk_iopoll_sched(&pbe_eq->iopoll);
- num_ioeq_processed++;
- }
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- }
- if (num_ioeq_processed || num_mcceq_processed) {
- if (pbe_eq->todo_mcc_cq)
- queue_work(phba->wq, &pbe_eq->work_cqs);
-
- if ((num_mcceq_processed) && (!num_ioeq_processed))
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed) , 1, 1);
- else
- hwi_ring_eq_db(phba, eq->id, 0,
- (num_ioeq_processed +
- num_mcceq_processed), 0, 1);
-
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
- } else {
- cq = &phwi_context->be_cq[0];
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
-
- if (((eqe->dw[offsetof(struct amap_eq_entry,
- resource_id) / 32] &
- EQE_RESID_MASK) >> 16) != cq->id) {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_mcc_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- } else {
- spin_lock_irqsave(&phba->isr_lock, flags);
- pbe_eq->todo_cq = true;
- spin_unlock_irqrestore(&phba->isr_lock, flags);
- }
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+ & EQE_VALID_MASK) {
+ if (((eqe->dw[offsetof(struct amap_eq_entry,
+ resource_id) / 32] &
+ EQE_RESID_MASK) >> 16) == mcc->id) {
+ spin_lock_irqsave(&phba->isr_lock, flags);
+ pbe_eq->todo_mcc_cq = true;
+ spin_unlock_irqrestore(&phba->isr_lock, flags);
+ num_mcceq_processed++;
+ } else {
+ if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
+ blk_iopoll_sched(&pbe_eq->iopoll);
num_ioeq_processed++;
}
- if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ }
+ if (num_ioeq_processed || num_mcceq_processed) {
+ if (pbe_eq->todo_mcc_cq)
queue_work(phba->wq, &pbe_eq->work_cqs);
- if (num_ioeq_processed) {
+ if ((num_mcceq_processed) && (!num_ioeq_processed))
hwi_ring_eq_db(phba, eq->id, 0,
- num_ioeq_processed, 1, 1);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
- }
+ (num_ioeq_processed +
+ num_mcceq_processed) , 1, 1);
+ else
+ hwi_ring_eq_db(phba, eq->id, 0,
+ (num_ioeq_processed +
+ num_mcceq_processed), 0, 1);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
}
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
@@ -5263,11 +5213,10 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
}
pci_disable_msix(phba->pcidev);
- if (blk_iopoll_enabled)
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_disable(&pbe_eq->iopoll);
- }
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
if (unload_state == BEISCSI_CLEAN_UNLOAD) {
destroy_workqueue(phba->wq);
@@ -5478,32 +5427,18 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (blk_iopoll_enabled) {
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
- blk_iopoll_enable(&pbe_eq->iopoll);
- }
-
- i = (phba->msix_enabled) ? i : 0;
- /* Work item for MCC handling */
+ for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
- } else {
- if (phba->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
- } else {
- pbe_eq = &phwi_context->be_eq[0];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
}
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5665,32 +5600,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (blk_iopoll_enabled) {
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
- be_iopoll);
- blk_iopoll_enable(&pbe_eq->iopoll);
- }
-
- i = (phba->msix_enabled) ? i : 0;
- /* Work item for MCC handling */
+ for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
- } else {
- if (phba->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
- } else {
- pbe_eq = &phwi_context->be_eq[0];
- INIT_WORK(&pbe_eq->work_cqs,
- beiscsi_process_all_cqs);
- }
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
}
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5719,11 +5640,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
free_blkenbld:
destroy_workqueue(phba->wq);
- if (blk_iopoll_enabled)
- for (i = 0; i < phba->num_cpus; i++) {
- pbe_eq = &phwi_context->be_eq[i];
- blk_iopoll_disable(&pbe_eq->iopoll);
- }
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_disable(&pbe_eq->iopoll);
+ }
free_twq:
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2f8dd8e4225..924b0ba74df 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,16 +3670,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,
return strlen(buf);
}
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++)
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
}
spin_lock_irqsave(shost->host_lock, lock_flags);
ioa_cfg->iopoll_weight = user_iopoll_weight;
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -5525,8 +5523,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
return IRQ_NONE;
}
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
hrrq->toggle_bit) {
if (!blk_iopoll_sched_prep(&hrrq->iopoll))
@@ -9975,8 +9972,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
ioa_cfg->host->max_channel = IPR_VSET_BUS;
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -10005,8 +10001,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
- ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
ioa_cfg->iopoll_weight = 0;
for (i = 1; i < ioa_cfg->hrrq_num; i++)
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 4f70f383132..29696b78d1f 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -301,25 +301,25 @@ int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
EXPORT_SYMBOL(bio_integrity_get_tag);
/**
- * bio_integrity_generate - Generate integrity metadata for a bio
- * @bio: bio to generate integrity metadata for
- *
- * Description: Generates integrity metadata for a bio by calling the
- * block device's generation callback function. The bio must have a
- * bip attached with enough room to accommodate the generated
- * integrity metadata.
+ * bio_integrity_generate_verify - Generate/verify integrity metadata for a bio
+ * @bio: bio to generate/verify integrity metadata for
+ * @operate: operate number, 1 for generate, 0 for verify
*/
-static void bio_integrity_generate(struct bio *bio)
+static int bio_integrity_generate_verify(struct bio *bio, int operate)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_exchg bix;
struct bio_vec bv;
struct bvec_iter iter;
- sector_t sector = bio->bi_iter.bi_sector;
- unsigned int sectors, total;
+ sector_t sector;
+ unsigned int sectors, ret = 0;
void *prot_buf = bio->bi_integrity->bip_buf;
- total = 0;
+ if (operate)
+ sector = bio->bi_iter.bi_sector;
+ else
+ sector = bio->bi_integrity->bip_iter.bi_sector;
+
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
@@ -330,16 +330,37 @@ static void bio_integrity_generate(struct bio *bio)
bix.prot_buf = prot_buf;
bix.sector = sector;
- bi->generate_fn(&bix);
+ if (operate) {
+ bi->generate_fn(&bix);
+ } else {
+ ret = bi->verify_fn(&bix);
+ if (ret) {
+ kunmap_atomic(kaddr);
+ return ret;
+ }
+ }
sectors = bv.bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
- total += sectors * bi->tuple_size;
- BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
kunmap_atomic(kaddr);
}
+ return ret;
+}
+
+/**
+ * bio_integrity_generate - Generate integrity metadata for a bio
+ * @bio: bio to generate integrity metadata for
+ *
+ * Description: Generates integrity metadata for a bio by calling the
+ * block device's generation callback function. The bio must have a
+ * bip attached with enough room to accommodate the generated
+ * integrity metadata.
+ */
+static void bio_integrity_generate(struct bio *bio)
+{
+ bio_integrity_generate_verify(bio, 1);
}
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
@@ -454,40 +475,7 @@ EXPORT_SYMBOL(bio_integrity_prep);
*/
static int bio_integrity_verify(struct bio *bio)
{
- struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
- struct blk_integrity_exchg bix;
- struct bio_vec *bv;
- sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
- unsigned int sectors, ret = 0;
- void *prot_buf = bio->bi_integrity->bip_buf;
- int i;
-
- bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
- bix.sector_size = bi->sector_size;
-
- bio_for_each_segment_all(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
-
- bix.data_buf = kaddr + bv->bv_offset;
- bix.data_size = bv->bv_len;
- bix.prot_buf = prot_buf;
- bix.sector = sector;
-
- ret = bi->verify_fn(&bix);
-
- if (ret) {
- kunmap_atomic(kaddr);
- return ret;
- }
-
- sectors = bv->bv_len / bi->sector_size;
- sector += sectors;
- prot_buf += sectors * bi->tuple_size;
-
- kunmap_atomic(kaddr);
- }
-
- return ret;
+ return bio_integrity_generate_verify(bio, 0);
}
/**
diff --git a/fs/bio.c b/fs/bio.c
index 8754e7b6eb4..b2dd42ed9ed 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -116,7 +116,6 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
if (!slab)
goto out_unlock;
- printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
bslab->slab = slab;
bslab->slab_ref = 1;
bslab->slab_size = sz;
diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h
index 308734d3d4a..77ae77c0b70 100644
--- a/include/linux/blk-iopoll.h
+++ b/include/linux/blk-iopoll.h
@@ -43,6 +43,4 @@ extern void __blk_iopoll_complete(struct blk_iopoll *);
extern void blk_iopoll_enable(struct blk_iopoll *);
extern void blk_iopoll_disable(struct blk_iopoll *);
-extern int blk_iopoll_enabled;
-
#endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2ff2e8d982b..0120451545d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -109,7 +109,7 @@ enum {
BLK_MQ_F_SHOULD_SORT = 1 << 1,
BLK_MQ_F_SHOULD_IPI = 1 << 2,
- BLK_MQ_S_STOPPED = 1 << 0,
+ BLK_MQ_S_STOPPED = 0,
BLK_MQ_MAX_DEPTH = 2048,
};
@@ -117,7 +117,8 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
-void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
+int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
+void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4afa4f8f609..1e1fa3f93d5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -99,6 +99,7 @@ struct request {
union {
struct call_single_data csd;
struct work_struct mq_flush_work;
+ unsigned long fifo_time;
};
struct request_queue *q;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 306dd8cd0b6..df63bd3a8cf 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -202,17 +202,8 @@ enum {
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
-/*
- * Hack to reuse the csd.list list_head as the fifo time holder while
- * the request is in the io scheduler. Saves an unsigned long in rq.
- */
-#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
-#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_fifo_clear(rq) do { \
- list_del_init(&(rq)->queuelist); \
- INIT_LIST_HEAD(&(rq)->csd.list); \
- } while (0)
+#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
#else /* CONFIG_BLOCK */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 6ae004e437e..633f5edd747 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -17,10 +17,7 @@ extern void cpu_idle(void);
typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- union {
- struct list_head list;
- struct llist_node llist;
- };
+ struct llist_node llist;
smp_call_func_t func;
void *info;
u16 flags;
@@ -53,8 +50,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);
-void __smp_call_function_single(int cpuid, struct call_single_data *data,
- int wait);
+int smp_call_function_single_async(int cpu, struct call_single_data *csd);
#ifdef CONFIG_SMP
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index e76ae19a8d6..e8a5eca1dbe 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -132,6 +132,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
* block_rq_complete - block IO operation completed by device driver
* @q: queue containing the block operation request
* @rq: block operations request
+ * @nr_bytes: number of completed bytes
*
* The block_rq_complete tracepoint event indicates that some portion
* of operation request has been completed by the device driver. If
@@ -139,11 +140,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
* do for the request. If @rq->bio is non-NULL then there is
* additional work required to complete the request.
*/
-DEFINE_EVENT(block_rq_with_error, block_rq_complete,
+TRACE_EVENT(block_rq_complete,
- TP_PROTO(struct request_queue *q, struct request *rq),
+ TP_PROTO(struct request_queue *q, struct request *rq,
+ unsigned int nr_bytes),
- TP_ARGS(q, rq)
+ TP_ARGS(q, rq, nr_bytes),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, RWBS_LEN )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_rq_pos(rq);
+ __entry->nr_sector = nr_bytes >> 9;
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
);
DECLARE_EVENT_CLASS(block_rq,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3c4d096544c..9cae286824b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -432,7 +432,7 @@ void hrtick_start(struct rq *rq, u64 delay)
if (rq == this_rq()) {
__hrtick_restart(rq);
} else if (!rq->hrtick_csd_pending) {
- __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
+ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
rq->hrtick_csd_pending = 1;
}
}
diff --git a/kernel/smp.c b/kernel/smp.c
index ffee35bef17..06d574e42c7 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd)
csd->flags &= ~CSD_FLAG_LOCK;
}
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
+
/*
* Insert a previously allocated call_single_data element
* for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set.
*/
-static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
+static int generic_exec_single(int cpu, struct call_single_data *csd,
+ smp_call_func_t func, void *info, int wait)
{
+ struct call_single_data csd_stack = { .flags = 0 };
+ unsigned long flags;
+
+
+ if (cpu == smp_processor_id()) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ return 0;
+ }
+
+
+ if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
+ return -ENXIO;
+
+
+ if (!csd) {
+ csd = &csd_stack;
+ if (!wait)
+ csd = &__get_cpu_var(csd_data);
+ }
+
+ csd_lock(csd);
+
+ csd->func = func;
+ csd->info = info;
+
if (wait)
csd->flags |= CSD_FLAG_WAIT;
@@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
if (wait)
csd_lock_wait(csd);
+
+ return 0;
}
/*
@@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
*/
void generic_smp_call_function_single_interrupt(void)
{
- struct llist_node *entry, *next;
+ struct llist_node *entry;
+ struct call_single_data *csd, *csd_next;
/*
* Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void)
entry = llist_del_all(&__get_cpu_var(call_single_queue));
entry = llist_reverse_order(entry);
- while (entry) {
- struct call_single_data *csd;
-
- next = entry->next;
-
- csd = llist_entry(entry, struct call_single_data, llist);
+ llist_for_each_entry_safe(csd, csd_next, entry, llist) {
csd->func(csd->info);
csd_unlock(csd);
-
- entry = next;
}
}
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
-
/*
* smp_call_function_single - Run a function on a specific CPU
* @func: The function to run. This must be fast and non-blocking.
@@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int wait)
{
- struct call_single_data d = {
- .flags = 0,
- };
- unsigned long flags;
int this_cpu;
- int err = 0;
+ int err;
/*
* prevent preemption and reschedule on another processor,
@@ -209,32 +229,41 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress);
- if (cpu == this_cpu) {
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- } else {
- if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
- struct call_single_data *csd = &d;
+ err = generic_exec_single(cpu, NULL, func, info, wait);
- if (!wait)
- csd = &__get_cpu_var(csd_data);
+ put_cpu();
- csd_lock(csd);
+ return err;
+}
+EXPORT_SYMBOL(smp_call_function_single);
- csd->func = func;
- csd->info = info;
- generic_exec_single(cpu, csd, wait);
- } else {
- err = -ENXIO; /* CPU not online */
- }
- }
+/**
+ * smp_call_function_single_async(): Run an asynchronous function on a
+ * specific CPU.
+ * @cpu: The CPU to run on.
+ * @csd: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but the call is asynchonous and
+ * can thus be done from contexts with disabled interrupts.
+ *
+ * The caller passes his own pre-allocated data structure
+ * (ie: embedded in an object) and is responsible for synchronizing it
+ * such that the IPIs performed on the @csd are strictly serialized.
+ *
+ * NOTE: Be careful, there is unfortunately no current debugging facility to
+ * validate the correctness of this serialization.
+ */
+int smp_call_function_single_async(int cpu, struct call_single_data *csd)
+{
+ int err = 0;
- put_cpu();
+ preempt_disable();
+ err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
+ preempt_enable();
return err;
}
-EXPORT_SYMBOL(smp_call_function_single);
+EXPORT_SYMBOL_GPL(smp_call_function_single_async);
/*
* smp_call_function_any - Run a function on any of the given cpus
@@ -280,44 +309,6 @@ call:
EXPORT_SYMBOL_GPL(smp_call_function_any);
/**
- * __smp_call_function_single(): Run a function on a specific CPU
- * @cpu: The CPU to run on.
- * @data: Pre-allocated and setup data structure
- * @wait: If true, wait until function has completed on specified CPU.
- *
- * Like smp_call_function_single(), but allow caller to pass in a
- * pre-allocated data structure. Useful for embedding @data inside
- * other structures, for instance.
- */
-void __smp_call_function_single(int cpu, struct call_single_data *csd,
- int wait)
-{
- unsigned int this_cpu;
- unsigned long flags;
-
- this_cpu = get_cpu();
- /*
- * Can deadlock when called with interrupts disabled.
- * We allow cpu's that are not yet online though, as no one else can
- * send smp call function interrupt to this cpu and as such deadlocks
- * can't happen.
- */
- WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
- && !oops_in_progress);
-
- if (cpu == this_cpu) {
- local_irq_save(flags);
- csd->func(csd->info);
- local_irq_restore(flags);
- } else {
- csd_lock(csd);
- generic_exec_single(cpu, csd, wait);
- }
- put_cpu();
-}
-EXPORT_SYMBOL_GPL(__smp_call_function_single);
-
-/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7754ff16f33..09d2e241360 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
#ifndef CONFIG_MMU
extern int sysctl_nr_trim_pages;
#endif
-#ifdef CONFIG_BLOCK
-extern int blk_iopoll_enabled;
-#endif
/* Constants used for minimum and maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
@@ -1087,15 +1084,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
-#ifdef CONFIG_BLOCK
- {
- .procname = "blk_iopoll",
- .data = &blk_iopoll_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
{ }
};
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b418cb0d724..4f3a3c03ead 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q)
* blk_add_trace_rq - Add a trace for a request oriented action
* @q: queue the io is for
* @rq: the source request
+ * @nr_bytes: number of completed bytes
* @what: the action
*
* Description:
@@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q)
*
**/
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
- u32 what)
+ unsigned int nr_bytes, u32 what)
{
struct blk_trace *bt = q->blk_trace;
@@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
+ __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
what, rq->errors, rq->cmd_len, rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+ __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
rq->cmd_flags, what, rq->errors, 0, NULL);
}
}
@@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
static void blk_add_trace_rq_abort(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
}
static void blk_add_trace_rq_insert(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
}
static void blk_add_trace_rq_issue(void *ignore,
struct request_queue *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
}
static void blk_add_trace_rq_requeue(void *ignore,
struct request_queue *q,
struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
}
static void blk_add_trace_rq_complete(void *ignore,
struct request_queue *q,
- struct request *rq)
+ struct request *rq,
+ unsigned int nr_bytes)
{
- blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
+ blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
}
/**
diff --git a/kernel/up.c b/kernel/up.c
index 509403e3fbc..1760bf3d146 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -22,16 +22,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
}
EXPORT_SYMBOL(smp_call_function_single);
-void __smp_call_function_single(int cpu, struct call_single_data *csd,
- int wait)
+int smp_call_function_single_async(int cpu, struct call_single_data *csd)
{
unsigned long flags;
local_irq_save(flags);
csd->func(csd->info);
local_irq_restore(flags);
+ return 0;
}
-EXPORT_SYMBOL(__smp_call_function_single);
+EXPORT_SYMBOL(smp_call_function_single_async);
int on_each_cpu(smp_call_func_t func, void *info, int wait)
{
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 4431610f049..01c6f979486 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -505,7 +505,6 @@ static void restart_watchdog_hrtimer(void *info)
static void update_timers(int cpu)
{
- struct call_single_data data = {.func = restart_watchdog_hrtimer};
/*
* Make sure that perf event counter will adopt to a new
* sampling period. Updating the sampling period directly would
@@ -515,7 +514,7 @@ static void update_timers(int cpu)
* might be late already so we have to restart the timer as well.
*/
watchdog_nmi_disable(cpu);
- __smp_call_function_single(cpu, &data, 1);
+ smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
watchdog_nmi_enable(cpu);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 45fa2f11f84..bc3c89792b0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4135,8 +4135,8 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
struct softnet_data *next = remsd->rps_ipi_next;
if (cpu_online(remsd->cpu))
- __smp_call_function_single(remsd->cpu,
- &remsd->csd, 0);
+ smp_call_function_single_async(remsd->cpu,
+ &remsd->csd);
remsd = next;
}
} else