summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCong Wang <amwang@redhat.com>2011-11-25 23:14:24 +0800
committerCong Wang <xiyou.wangcong@gmail.com>2012-03-20 21:48:20 +0800
commit86cbce4de23f26f1aee8df789d30b0f1ba78c967 (patch)
treed2465378339d2000ca8588cefe7d0025242cde0a
parent77dfce076cbd76c04e90abff188d058cdbff78dd (diff)
hv: remove the second argument of k[un]map_atomic()
Acked-by: Greg Kroah-Hartman <gregkh@suse.de> Signed-off-by: Cong Wang <amwang@redhat.com>
-rw-r--r--drivers/staging/hv/storvsc_drv.c31
1 files changed, 14 insertions, 17 deletions
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index eb853f71089..ddbdec8f57f 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -941,15 +941,14 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
local_irq_save(flags);
for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
+ dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
+ + orig_sgl[i].offset;
dest = dest_addr;
destlen = orig_sgl[i].length;
if (bounce_addr == 0)
bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
while (destlen) {
src = bounce_addr + bounce_sgl[j].offset;
@@ -965,7 +964,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
if (bounce_sgl[j].offset == bounce_sgl[j].length) {
/* full */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
j++;
/*
@@ -990,15 +989,14 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
if (destlen || i != orig_sgl_count - 1)
bounce_addr =
(unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
+ sg_page((&bounce_sgl[j])));
} else if (destlen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
}
}
- kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
- KM_IRQ0);
+ kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset));
}
local_irq_restore(flags);
@@ -1024,15 +1022,14 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
local_irq_save(flags);
for (i = 0; i < orig_sgl_count; i++) {
- src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
- KM_IRQ0) + orig_sgl[i].offset;
+ src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
+ + orig_sgl[i].offset;
src = src_addr;
srclen = orig_sgl[i].length;
if (bounce_addr == 0)
bounce_addr =
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
- KM_IRQ0);
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
while (srclen) {
/* assume bounce offset always == 0 */
@@ -1049,22 +1046,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
if (bounce_sgl[j].length == PAGE_SIZE) {
/* full..move to next entry */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
j++;
/* if we need to use another bounce buffer */
if (srclen || i != orig_sgl_count - 1)
bounce_addr =
(unsigned long)kmap_atomic(
- sg_page((&bounce_sgl[j])), KM_IRQ0);
+ sg_page((&bounce_sgl[j])));
} else if (srclen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
+ kunmap_atomic((void *)bounce_addr);
}
}
- kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
+ kunmap_atomic((void *)(src_addr - orig_sgl[i].offset));
}
local_irq_restore(flags);