summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-02-04 22:29:26 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:44:18 -0800
commit920c7a5d0c94b8ce740f1d76fa06422f2a95a757 (patch)
tree74ab4b9b5a6f4279b9b9d2a463c6700546ba0011 /mm/filemap.c
parent1e548deb5d1630ca14ba04da04e3b6b3766178c7 (diff)
mm: remove fastcall from mm/
fastcall is always defined to be empty, remove it [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 96920f84056..81fb9bff0d4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit)
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
-void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()).
*/
-void fastcall unlock_page(struct page *page)
+void unlock_page(struct page *page)
{
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback);
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
-void fastcall __lock_page(struct page *page)
+void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page)
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
-void fastcall __lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int fastcall page_cache_read(struct file * file, pgoff_t offset)
+static int page_cache_read(struct file *file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;