summaryrefslogtreecommitdiffstats
path: root/arch/arm26/mm/small_page.c
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@stusta.de>2007-07-31 00:38:19 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-31 15:39:39 -0700
commit99eb8a550dbccc0e1f6c7e866fe421810e0585f6 (patch)
tree130c6e3338a0655ba74355eba83afab9261e1ed0 /arch/arm26/mm/small_page.c
parent0d0ed42e5ca2e22465c591341839c18025748fe8 (diff)
Remove the arm26 port
The arm26 port has been in a state where it was far from even compiling for quite some time. Ian Molton agreed with the removal. Signed-off-by: Adrian Bunk <bunk@stusta.de> Cc: Ian Molton <spyro@f2s.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm26/mm/small_page.c')
-rw-r--r--arch/arm26/mm/small_page.c192
1 files changed, 0 insertions, 192 deletions
diff --git a/arch/arm26/mm/small_page.c b/arch/arm26/mm/small_page.c
deleted file mode 100644
index 30447106c25..00000000000
--- a/arch/arm26/mm/small_page.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * linux/arch/arm26/mm/small_page.c
- *
- * Copyright (C) 1996 Russell King
- * Copyright (C) 2003 Ian Molton
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Changelog:
- * 26/01/1996 RMK Cleaned up various areas to make little more generic
- * 07/02/1999 RMK Support added for 16K and 32K page sizes
- * containing 8K blocks
- * 23/05/2004 IM Fixed to use struct page->lru (thanks wli)
- *
- */
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-#include <linux/bitops.h>
-
-#include <asm/pgtable.h>
-
-#define PEDANTIC
-
-/*
- * Requirement:
- * We need to be able to allocate naturally aligned memory of finer
- * granularity than the page size. This is typically used for the
- * second level page tables on 32-bit ARMs.
- *
- * FIXME - this comment is *out of date*
- * Theory:
- * We "misuse" the Linux memory management system. We use alloc_page
- * to allocate a page and then mark it as reserved. The Linux memory
- * management system will then ignore the "offset", "next_hash" and
- * "pprev_hash" entries in the mem_map for this page.
- *
- * We then use a bitstring in the "offset" field to mark which segments
- * of the page are in use, and manipulate this as required during the
- * allocation and freeing of these small pages.
- *
- * We also maintain a queue of pages being used for this purpose using
- * the "next_hash" and "pprev_hash" entries of mem_map;
- */
-
-struct order {
- struct list_head queue;
- unsigned int mask; /* (1 << shift) - 1 */
- unsigned int shift; /* (1 << shift) size of page */
- unsigned int block_mask; /* nr_blocks - 1 */
- unsigned int all_used; /* (1 << nr_blocks) - 1 */
-};
-
-
-static struct order orders[] = {
-#if PAGE_SIZE == 32768
- { LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff },
- { LIST_HEAD_INIT(orders[1].queue), 8191, 13, 3, 0x0000000f }
-#else
-#error unsupported page size (ARGH!)
-#endif
-};
-
-#define USED_MAP(pg) ((pg)->index)
-#define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
-#define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
-
-static DEFINE_SPINLOCK(small_page_lock);
-
-static unsigned long __get_small_page(int priority, struct order *order)
-{
- unsigned long flags;
- struct page *page;
- int offset;
-
- do {
- spin_lock_irqsave(&small_page_lock, flags);
-
- if (list_empty(&order->queue))
- goto need_new_page;
-
- page = list_entry(order->queue.next, struct page, lru);
-again:
-#ifdef PEDANTIC
- BUG_ON(USED_MAP(page) & ~order->all_used);
-#endif
- offset = ffz(USED_MAP(page));
- SET_USED(page, offset);
- if (USED_MAP(page) == order->all_used)
- list_del_init(&page->lru);
- spin_unlock_irqrestore(&small_page_lock, flags);
-
- return (unsigned long) page_address(page) + (offset << order->shift);
-
-need_new_page:
- spin_unlock_irqrestore(&small_page_lock, flags);
- page = alloc_page(priority);
- spin_lock_irqsave(&small_page_lock, flags);
-
- if (list_empty(&order->queue)) {
- if (!page)
- goto no_page;
- SetPageReserved(page);
- USED_MAP(page) = 0;
- list_add(&page->lru, &order->queue);
- goto again;
- }
-
- spin_unlock_irqrestore(&small_page_lock, flags);
- __free_page(page);
- } while (1);
-
-no_page:
- spin_unlock_irqrestore(&small_page_lock, flags);
- return 0;
-}
-
-static void __free_small_page(unsigned long spage, struct order *order)
-{
- unsigned long flags;
- struct page *page;
-
- if (virt_addr_valid(spage)) {
- page = virt_to_page(spage);
-
- /*
- * The container-page must be marked Reserved
- */
- if (!PageReserved(page) || spage & order->mask)
- goto non_small;
-
-#ifdef PEDANTIC
- BUG_ON(USED_MAP(page) & ~order->all_used);
-#endif
-
- spage = spage >> order->shift;
- spage &= order->block_mask;
-
- /*
- * the following must be atomic wrt get_page
- */
- spin_lock_irqsave(&small_page_lock, flags);
-
- if (USED_MAP(page) == order->all_used)
- list_add(&page->lru, &order->queue);
-
- if (!TEST_AND_CLEAR_USED(page, spage))
- goto already_free;
-
- if (USED_MAP(page) == 0)
- goto free_page;
-
- spin_unlock_irqrestore(&small_page_lock, flags);
- }
- return;
-
-free_page:
- /*
- * unlink the page from the small page queue and free it
- */
- list_del_init(&page->lru);
- spin_unlock_irqrestore(&small_page_lock, flags);
- ClearPageReserved(page);
- __free_page(page);
- return;
-
-non_small:
- printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
- return;
-already_free:
- printk("Trying to free free small page from %p\n", __builtin_return_address(0));
-}
-
-unsigned long get_page_8k(int priority)
-{
- return __get_small_page(priority, orders+1);
-}
-
-void free_page_8k(unsigned long spage)
-{
- __free_small_page(spage, orders+1);
-}