summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bsg.c2
-rw-r--r--block/cfq-iosched.c67
-rw-r--r--block/ll_rw_blk.c6
-rw-r--r--block/scsi_ioctl.c3
4 files changed, 49 insertions, 29 deletions
diff --git a/block/bsg.c b/block/bsg.c
index 1ba9bc6d9a4..b571869928a 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1035,7 +1035,7 @@ static int __init bsg_init(void)
dev_t devid;
bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
- sizeof(struct bsg_command), 0, 0, NULL, NULL);
+ sizeof(struct bsg_command), 0, 0, NULL);
if (!bsg_cmd_cachep) {
printk(KERN_ERR "bsg: failed creating slab cache\n");
return -ENOMEM;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9755a3cfad2..d148ccbc36d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -92,7 +92,11 @@ struct cfq_data {
struct cfq_queue *active_queue;
struct cfq_io_context *active_cic;
- struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
+ /*
+ * async queue for each priority case
+ */
+ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+ struct cfq_queue *async_idle_cfqq;
struct timer_list idle_class_timer;
@@ -111,9 +115,6 @@ struct cfq_data {
unsigned int cfq_slice_idle;
struct list_head cic_list;
-
- sector_t new_seek_mean;
- u64 new_seek_total;
};
/*
@@ -153,8 +154,6 @@ struct cfq_queue {
/* various state flags, see below */
unsigned int flags;
-
- sector_t last_request_pos;
};
enum cfqq_state_flags {
@@ -1414,24 +1413,44 @@ out:
return cfqq;
}
+static struct cfq_queue **
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+{
+ switch(ioprio_class) {
+ case IOPRIO_CLASS_RT:
+ return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_BE:
+ return &cfqd->async_cfqq[1][ioprio];
+ case IOPRIO_CLASS_IDLE:
+ return &cfqd->async_idle_cfqq;
+ default:
+ BUG();
+ }
+}
+
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
gfp_t gfp_mask)
{
const int ioprio = task_ioprio(tsk);
+ const int ioprio_class = task_ioprio_class(tsk);
+ struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
- if (!is_sync)
- cfqq = cfqd->async_cfqq[ioprio];
+ if (!is_sync) {
+ async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ cfqq = *async_cfqq;
+ }
+
if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
*/
- if (!is_sync && !cfqd->async_cfqq[ioprio]) {
+ if (!is_sync && !(*async_cfqq)) {
atomic_inc(&cfqq->ref);
- cfqd->async_cfqq[ioprio] = cfqq;
+ *async_cfqq = cfqq;
}
atomic_inc(&cfqq->ref);
@@ -1597,11 +1616,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
else
sdist = cic->last_request_pos - rq->sector;
- if (!cic->seek_samples) {
- cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
- cfqd->new_seek_mean = cfqd->new_seek_total / 256;
- }
-
/*
* Don't allow the seek distance to get too large from the
* odd fragment, pagein, etc
@@ -1737,7 +1751,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_request_pos = rq->sector + rq->nr_sectors;
- cfqq->last_request_pos = cic->last_request_pos;
if (cfqq == cfqd->active_queue) {
/*
@@ -2042,11 +2055,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
blk_sync_queue(cfqd->queue);
}
+static void cfq_put_async_queues(struct cfq_data *cfqd)
+{
+ int i;
+
+ for (i = 0; i < IOPRIO_BE_NR; i++) {
+ if (cfqd->async_cfqq[0][i])
+ cfq_put_queue(cfqd->async_cfqq[0][i]);
+ if (cfqd->async_cfqq[1][i])
+ cfq_put_queue(cfqd->async_cfqq[1][i]);
+ if (cfqd->async_idle_cfqq)
+ cfq_put_queue(cfqd->async_idle_cfqq);
+ }
+}
+
static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
request_queue_t *q = cfqd->queue;
- int i;
cfq_shutdown_timer_wq(cfqd);
@@ -2063,12 +2089,7 @@ static void cfq_exit_queue(elevator_t *e)
__cfq_exit_single_io_context(cfqd, cic);
}
- /*
- * Put the async queues
- */
- for (i = 0; i < IOPRIO_BE_NR; i++)
- if (cfqd->async_cfqq[i])
- cfq_put_queue(cfqd->async_cfqq[i]);
+ cfq_put_async_queues(cfqd);
spin_unlock_irq(q->queue_lock);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d7cadf30416..66056ca5e63 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3698,13 +3698,13 @@ int __init blk_dev_init(void)
panic("Failed to create kblockd\n");
request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct request), 0, SLAB_PANIC, NULL);
requestq_cachep = kmem_cache_create("blkdev_queue",
- sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(request_queue_t), 0, SLAB_PANIC, NULL);
iocontext_cachep = kmem_cache_create("blkdev_ioc",
- sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL);
for_each_possible_cpu(i)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 7bfebd574e5..d359a715bbc 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -433,11 +433,10 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
bytes = max(in_len, out_len);
if (bytes) {
- buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+ buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
if (!buffer)
return -ENOMEM;
- memset(buffer, 0, bytes);
}
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);