diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 09:40:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 09:40:26 -0700 |
commit | 9f3938346a5c1fa504647670edb5fea5756cfb00 (patch) | |
tree | 7cf6d24d6b076c8db8571494984924cac03703a2 /mm | |
parent | 69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff) | |
parent | 317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff) |
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang.
It's been in -next for a long time, and it gets rid of the (no longer
used) second argument to k[un]map_atomic().
Fix up a few trivial conflicts in various drivers, and do an "evil
merge" to catch some new uses that have come in since Cong's tree.
* 'kmap_atomic' of git://github.com/congwang/linux: (59 commits)
feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal
highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename]
drbd: remove the second argument of k[un]map_atomic()
zcache: remove the second argument of k[un]map_atomic()
gma500: remove the second argument of k[un]map_atomic()
dm: remove the second argument of k[un]map_atomic()
tomoyo: remove the second argument of k[un]map_atomic()
sunrpc: remove the second argument of k[un]map_atomic()
rds: remove the second argument of k[un]map_atomic()
net: remove the second argument of k[un]map_atomic()
mm: remove the second argument of k[un]map_atomic()
lib: remove the second argument of k[un]map_atomic()
power: remove the second argument of k[un]map_atomic()
kdb: remove the second argument of k[un]map_atomic()
udf: remove the second argument of k[un]map_atomic()
ubifs: remove the second argument of k[un]map_atomic()
squashfs: remove the second argument of k[un]map_atomic()
reiserfs: remove the second argument of k[un]map_atomic()
ocfs2: remove the second argument of k[un]map_atomic()
ntfs: remove the second argument of k[un]map_atomic()
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bounce.c | 4 | ||||
-rw-r--r-- | mm/filemap.c | 8 | ||||
-rw-r--r-- | mm/ksm.c | 12 | ||||
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 4 | ||||
-rw-r--r-- | mm/swapfile.c | 30 | ||||
-rw-r--r-- | mm/vmalloc.c | 8 |
7 files changed, 35 insertions, 35 deletions
diff --git a/mm/bounce.c b/mm/bounce.c index 4e9ae722af8..d1be02ca188 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) unsigned char *vto; local_irq_save(flags); - vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); + vto = kmap_atomic(to->bv_page); memcpy(vto + to->bv_offset, vfrom, to->bv_len); - kunmap_atomic(vto, KM_BOUNCE_READ); + kunmap_atomic(vto); local_irq_restore(flags); } diff --git a/mm/filemap.c b/mm/filemap.c index b66275757c2..2f8165075a5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page, * taking the kmap. */ if (!fault_in_pages_writeable(desc->arg.buf, size)) { - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); left = __copy_to_user_inatomic(desc->arg.buf, kaddr + offset, size); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); if (left == 0) goto success; } @@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, size_t copied; BUG_ON(!in_atomic()); - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); if (likely(i->nr_segs == 1)) { int left; char __user *buf = i->iov->iov_base + i->iov_offset; @@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, copied = __iovec_copy_from_user_inatomic(kaddr + offset, i->iov, i->iov_offset, bytes); } - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); return copied; } @@ -672,9 +672,9 @@ error: static u32 calc_checksum(struct page *page) { u32 checksum; - void *addr = kmap_atomic(page, KM_USER0); + void *addr = kmap_atomic(page); checksum = jhash2(addr, PAGE_SIZE / 4, 17); - kunmap_atomic(addr, KM_USER0); + kunmap_atomic(addr); return checksum; } @@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2) char *addr1, *addr2; int ret; - addr1 = kmap_atomic(page1, KM_USER0); - addr2 = kmap_atomic(page2, KM_USER1); + addr1 = kmap_atomic(page1); + addr2 = kmap_atomic(page2); ret = memcmp(addr1, addr2, PAGE_SIZE); - kunmap_atomic(addr2, KM_USER1); - kunmap_atomic(addr1, KM_USER0); + kunmap_atomic(addr2); + kunmap_atomic(addr1); return ret; } diff --git a/mm/memory.c b/mm/memory.c index fa2f04e0337..347e5fad1cf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo * fails, we just zero-fill it. Live with it. */ if (unlikely(!src)) { - void *kaddr = kmap_atomic(dst, KM_USER0); + void *kaddr = kmap_atomic(dst); void __user *uaddr = (void __user *)(va & PAGE_MASK); /* @@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) clear_page(kaddr); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); flush_dcache_page(dst); } else copy_user_highpage(dst, src, va, vma); diff --git a/mm/shmem.c b/mm/shmem.c index 269d049294a..b7e19557186 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s } inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_symlink_inode_operations; - kaddr = kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(kaddr, symname, len); - kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); set_page_dirty(page); unlock_page(page); page_cache_release(page); diff --git a/mm/swapfile.c b/mm/swapfile.c index d999f090dfd..00a962caab1 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2427,9 +2427,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) if (!(count & COUNT_CONTINUED)) goto out; - map = kmap_atomic(list_page, KM_USER0) + offset; + map = kmap_atomic(list_page) + offset; count = *map; - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); /* * If this continuation count now has some space in it, @@ -2472,7 +2472,7 @@ static bool swap_count_continued(struct swap_info_struct *si, offset &= ~PAGE_MASK; page = list_entry(head->lru.next, struct page, lru); - map = kmap_atomic(page, KM_USER0) + offset; + map = kmap_atomic(page) + offset; if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ goto init_map; /* jump over SWAP_CONT_MAX checks */ @@ -2482,26 +2482,26 @@ static bool swap_count_continued(struct swap_info_struct *si, * Think of how you add 1 to 999 */ while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page = list_entry(page->lru.next, struct page, lru); BUG_ON(page == head); - map = kmap_atomic(page, KM_USER0) + offset; + map = kmap_atomic(page) + offset; } if (*map == SWAP_CONT_MAX) { - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page = list_entry(page->lru.next, struct page, lru); if (page == head) return false; /* add count continuation */ - map = kmap_atomic(page, KM_USER0) + offset; + map = kmap_atomic(page) + offset; init_map: *map = 0; /* we didn't zero the page */ } *map += 1; - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page = list_entry(page->lru.prev, struct page, lru); while (page != head) { - map = kmap_atomic(page, KM_USER0) + offset; + map = kmap_atomic(page) + offset; *map = COUNT_CONTINUED; - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page = list_entry(page->lru.prev, struct page, lru); } return true; /* incremented */ @@ -2512,22 +2512,22 @@ init_map: *map = 0; /* we didn't zero the page */ */ BUG_ON(count != COUNT_CONTINUED); while (*map == COUNT_CONTINUED) { - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page = list_entry(page->lru.next, struct page, lru); BUG_ON(page == head); - map = kmap_atomic(page, KM_USER0) + offset; + map = kmap_atomic(page) + offset; } BUG_ON(*map == 0); *map -= 1; if (*map == 0) count = 0; - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page = list_entry(page->lru.prev, struct page, lru); while (page != head) { - map = kmap_atomic(page, KM_USER0) + offset; + map = kmap_atomic(page) + offset; *map = SWAP_CONT_MAX | count; count = COUNT_CONTINUED; - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); page = list_entry(page->lru.prev, struct page, lru); } return count == COUNT_CONTINUED; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 86ce9a526c1..94dff883b44 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) * we can expect USER0 is not used (see vread/vwrite's * function description) */ - void *map = kmap_atomic(p, KM_USER0); + void *map = kmap_atomic(p); memcpy(buf, map + offset, length); - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); } else memset(buf, 0, length); @@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count) * we can expect USER0 is not used (see vread/vwrite's * function description) */ - void *map = kmap_atomic(p, KM_USER0); + void *map = kmap_atomic(p); memcpy(map + offset, buf, length); - kunmap_atomic(map, KM_USER0); + kunmap_atomic(map); } addr += length; buf += length; |