summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 10:52:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-11 11:10:35 -0700
commitc9059598ea8981d02356eead3188bf7fa4d717b8 (patch)
tree03e73b20a30e988da7c6a3e0ad93b2dc5843274d /arch
parent0a33f80a8373eca7f4bea3961d1346c3815fa5ed (diff)
parentb0fd271d5fba0b2d00888363f3869e3f9b26caa9 (diff)
Merge branch 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block: (153 commits) block: add request clone interface (v2) floppy: fix hibernation ramdisk: remove long-deprecated "ramdisk=" boot-time parameter fs/bio.c: add missing __user annotation block: prevent possible io_context->refcount overflow Add serial number support for virtio_blk, V4a block: Add missing bounce_pfn stacking and fix comments Revert "block: Fix bounce limit setting in DM" cciss: decode unit attention in SCSI error handling code cciss: Remove no longer needed sendcmd reject processing code cciss: change SCSI error handling routines to work with interrupts enabled. cciss: separate error processing and command retrying code in sendcmd_withirq_core() cciss: factor out fix target status processing code from sendcmd functions cciss: simplify interface of sendcmd() and sendcmd_withirq() cciss: factor out core of sendcmd_withirq() for use by SCSI error handling code cciss: Use schedule_timeout_uninterruptible in SCSI error handling code block: needs to set the residual length of a bidi request Revert "block: implement blkdev_readpages" block: Fix bounce limit setting in DM Removed reference to non-existing file Documentation/PCI/PCI-DMA-mapping.txt ... Manually fix conflicts with tracing updates in: block/blk-sysfs.c drivers/ide/ide-atapi.c drivers/ide/ide-cd.c drivers/ide/ide-floppy.c drivers/ide/ide-tape.c include/trace/events/block.h kernel/trace/blktrace.c
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/plat-omap/mailbox.c63
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/um/drivers/ubd_kern.c36
3 files changed, 48 insertions, 53 deletions
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 0abfbaa5987..40424edae93 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -147,24 +147,40 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
return ret;
}
+struct omap_msg_tx_data {
+ mbox_msg_t msg;
+ void *arg;
+};
+
+static void omap_msg_tx_end_io(struct request *rq, int error)
+{
+ kfree(rq->special);
+ __blk_put_request(rq->q, rq);
+}
+
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
{
+ struct omap_msg_tx_data *tx_data;
struct request *rq;
struct request_queue *q = mbox->txq->queue;
- int ret = 0;
+
+ tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
+ if (unlikely(!tx_data))
+ return -ENOMEM;
rq = blk_get_request(q, WRITE, GFP_ATOMIC);
if (unlikely(!rq)) {
- ret = -ENOMEM;
- goto fail;
+ kfree(tx_data);
+ return -ENOMEM;
}
- rq->data = (void *)msg;
- blk_insert_request(q, rq, 0, arg);
+ tx_data->msg = msg;
+ tx_data->arg = arg;
+ rq->end_io = omap_msg_tx_end_io;
+ blk_insert_request(q, rq, 0, tx_data);
schedule_work(&mbox->txq->work);
- fail:
- return ret;
+ return 0;
}
EXPORT_SYMBOL(omap_mbox_msg_send);
@@ -178,22 +194,28 @@ static void mbox_tx_work(struct work_struct *work)
struct request_queue *q = mbox->txq->queue;
while (1) {
+ struct omap_msg_tx_data *tx_data;
+
spin_lock(q->queue_lock);
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
spin_unlock(q->queue_lock);
if (!rq)
break;
- ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special);
+ tx_data = rq->special;
+
+ ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
if (ret) {
enable_mbox_irq(mbox, IRQ_TX);
+ spin_lock(q->queue_lock);
+ blk_requeue_request(q, rq);
+ spin_unlock(q->queue_lock);
return;
}
spin_lock(q->queue_lock);
- if (__blk_end_request(rq, 0, 0))
- BUG();
+ __blk_end_request_all(rq, 0);
spin_unlock(q->queue_lock);
}
}
@@ -218,16 +240,13 @@ static void mbox_rx_work(struct work_struct *work)
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
- msg = (mbox_msg_t) rq->data;
-
- if (blk_end_request(rq, 0, 0))
- BUG();
-
+ msg = (mbox_msg_t)rq->special;
+ blk_end_request_all(rq, 0);
mbox->rxq->callback((void *)msg);
}
}
@@ -264,7 +283,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
goto nomem;
msg = mbox_fifo_read(mbox);
- rq->data = (void *)msg;
if (unlikely(mbox_seq_test(mbox, msg))) {
pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
@@ -272,7 +290,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
mbox->err_notify();
}
- blk_insert_request(q, rq, 0, NULL);
+ blk_insert_request(q, rq, 0, (void *)msg);
if (mbox->ops->type == OMAP_MBOX_TYPE1)
break;
}
@@ -329,16 +347,15 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
- *p = (mbox_msg_t) rq->data;
+ *p = (mbox_msg_t)rq->special;
- if (blk_end_request(rq, 0, 0))
- BUG();
+ blk_end_request_all(rq, 0);
if (unlikely(mbox_seq_test(mbox, *p))) {
pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 9e105cbc5e5..a4779912a5c 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
- blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
+ blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
add_disk(bank->disk);
bank->irq_id = irq_of_parse_and_map(device->node, 0);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f934225fd8e..aa9e926e13d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
-
-static void ubd_end_request(struct request *req, int bytes, int error)
-{
- blk_end_request(req, error, bytes);
-}
-
-/* Callable only from interrupt context - otherwise you need to do
- * spin_lock_irq()/spin_lock_irqsave() */
-static inline void ubd_finish(struct request *req, int bytes)
-{
- if(bytes < 0){
- ubd_end_request(req, 0, -EIO);
- return;
- }
- ubd_end_request(req, bytes, 0);
-}
-
static LIST_HEAD(restart);
/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
static void ubd_handler(void)
{
struct io_thread_req *req;
- struct request *rq;
struct ubd *ubd;
struct list_head *list, *next_ele;
unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
return;
}
- rq = req->req;
- rq->nr_sectors -= req->length >> 9;
- if(rq->nr_sectors == 0)
- ubd_finish(rq, rq->hard_nr_sectors << 9);
+ blk_end_request(req->req, 0, req->length);
kfree(req);
}
reactivate_fd(thread_fd, UBD_IRQ);
@@ -1243,27 +1222,26 @@ static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
- int n, last_sectors;
+ sector_t sector;
+ int n;
while(1){
struct ubd *dev = q->queuedata;
if(dev->end_sg == 0){
- struct request *req = elv_next_request(q);
+ struct request *req = blk_fetch_request(q);
if(req == NULL)
return;
dev->request = req;
- blkdev_dequeue_request(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
- last_sectors = 0;
+ sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
- req->sector += last_sectors;
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if(io_req == NULL){
@@ -1272,10 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
return;
}
prepare_request(req, io_req,
- (unsigned long long) req->sector << 9,
+ (unsigned long long)sector << 9,
sg->offset, sg->length, sg_page(sg));
- last_sectors = sg->length >> 9;
+ sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){