diff options
Diffstat (limited to 'include/linux')
28 files changed, 587 insertions, 56 deletions
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index dd27b1c7227..5c6e12853a9 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -165,7 +165,7 @@ int sync_mapping_buffers(struct address_space *mapping); void unmap_underlying_metadata(struct block_device *bdev, sector_t block); void mark_buffer_async_write(struct buffer_head *bh); -void invalidate_bdev(struct block_device *, int); +void invalidate_bdev(struct block_device *); int sync_blockdev(struct block_device *bdev); void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); @@ -182,6 +182,7 @@ void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); +void invalidate_bh_lrus(void); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void FASTCALL(unlock_buffer(struct buffer_head *bh)); @@ -319,7 +320,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } -static inline void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) {} +static inline void invalidate_bdev(struct block_device *bdev) {} #endif /* CONFIG_BLOCK */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 9008eabb9c3..a9f794716a8 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -22,6 +22,9 @@ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ (typeof(ptr)) (__ptr + (off)); }) +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) \ + BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) #define inline inline __attribute__((always_inline)) #define __inline__ __inline__ __attribute__((always_inline)) diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index 1698b845761..ecd621fd27d 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -13,4 +13,10 @@ #define __must_check __attribute__((warn_unused_result)) #endif +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x + #define __always_inline inline __attribute__((always_inline)) diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 6f5cc6f0e7a..fd0cc7c4a63 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -16,3 +16,9 @@ #define __must_check __attribute__((warn_unused_result)) #define __compiler_offsetof(a,b) __builtin_offsetof(a,b) #define __always_inline inline __attribute__((always_inline)) + +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 1d1c3ceaff4..b769961e6f2 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -21,4 +21,9 @@ __ptr = (unsigned long) (ptr); \ (typeof(ptr)) (__ptr + (off)); }) +/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ +#define __must_be_array(a) 0 + #endif + +#define uninitialized_var(x) x diff --git a/include/linux/elf-em.h b/include/linux/elf-em.h index 666e0a5f00f..0311bad838b 100644 --- a/include/linux/elf-em.h +++ b/include/linux/elf-em.h @@ -30,6 +30,7 @@ #define EM_V850 87 /* NEC v850 */ #define EM_M32R 88 /* Renesas M32R */ #define EM_H8_300 46 /* Renesas H8/300,300H,H8S */ +#define EM_BLACKFIN 106 /* ADI Blackfin Processor */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ #define EM_AVR32 0x18ad /* Atmel AVR32 */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 745c988359c..071c67abed8 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -71,6 +71,18 @@ static inline int is_multicast_ether_addr(const u8 *addr) } /** + * is_local_ether_addr - Determine if the Ethernet address is locally-assigned + * one (IEEE 802). + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Return true if the address is a local address. + */ +static inline int is_local_ether_addr(const u8 *addr) +{ + return (0x02 & addr[0]); +} + +/** * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast * @addr: Pointer to a six-byte array containing the Ethernet address * diff --git a/include/linux/fs.h b/include/linux/fs.h index 7c0077f06e2..55a74ffa7e3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -696,12 +696,13 @@ struct file_ra_state { unsigned long size; unsigned long flags; /* ra flags RA_FLAG_xxx*/ unsigned long cache_hit; /* cache hit count*/ - unsigned long prev_page; /* Cache last read() position */ + unsigned long prev_index; /* Cache last read() position */ unsigned long ahead_start; /* Ahead window */ unsigned long ahead_size; unsigned long ra_pages; /* Maximum readahead window */ unsigned long mmap_hit; /* Cache hit stat for mmap accesses */ unsigned long mmap_miss; /* Cache miss stat for mmap accesses */ + unsigned int prev_offset; /* Offset where last read() ended in a page */ }; #define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */ #define RA_FLAG_INCACHE 0x02 /* file is already in cache */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2a7d15bcde4..97a36c3d96e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -40,7 +40,6 @@ struct vm_area_struct; #define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ -#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */ #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ @@ -53,7 +52,7 @@ struct vm_area_struct; /* if you forget to add the bitmask here kernel will crash, period */ #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ - __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ + __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) /* This equals 0, but use constants in case they ever change */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3f3e7a648da..b4570b62ab8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -189,4 +189,10 @@ static inline void set_file_hugepages(struct file *file) #endif /* !CONFIG_HUGETLBFS */ +#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/init.h b/include/linux/init.h index 9abf120ec9f..dbbdbd1bec7 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -233,7 +233,7 @@ void __init parse_early_param(void); #define __obsolete_setup(str) /* nothing */ #endif -/* Data marked not to be saved by software_suspend() */ +/* Data marked not to be saved by software suspend */ #define __nosavedata __attribute__ ((__section__ (".data.nosave"))) /* This means "can be init if no module support, otherwise module load diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e2f41b051b1..144b615f3a8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -35,7 +35,8 @@ extern const char linux_proc_banner[]; #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 75e55dcdeb1..e10a90a93b5 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -2,18 +2,29 @@ #define _LINUX_MIGRATE_H #include <linux/mm.h> +#include <linux/mempolicy.h> +#include <linux/pagemap.h> typedef struct page *new_page_t(struct page *, unsigned long private, int **); +#ifdef CONFIG_MIGRATION /* Check if a vma is migratable */ static inline int vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) return 0; + /* + * Migration allocates pages in the highest zone. If we cannot + * do so then migration (at least from node to node) is not + * possible. + */ + if (vma->vm_file && + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) + < policy_zone) + return 0; return 1; } -#ifdef CONFIG_MIGRATION extern int isolate_lru_page(struct page *p, struct list_head *pagelist); extern int putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, @@ -28,6 +39,8 @@ extern int migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags); #else +static inline int vma_migratable(struct vm_area_struct *vma) + { return 0; } static inline int isolate_lru_page(struct page *p, struct list_head *list) { return -ENOSYS; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 60e0e4a592d..4670ebd1f62 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -267,21 +267,31 @@ static inline int get_page_unless_zero(struct page *page) return atomic_inc_not_zero(&page->_count); } +static inline struct page *compound_head(struct page *page) +{ + if (unlikely(PageTail(page))) + return page->first_page; + return page; +} + static inline int page_count(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); - return atomic_read(&page->_count); + return atomic_read(&compound_head(page)->_count); } static inline void get_page(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); + page = compound_head(page); VM_BUG_ON(atomic_read(&page->_count) == 0); atomic_inc(&page->_count); } +static inline struct page *virt_to_head_page(const void *x) +{ + struct page *page = virt_to_page(x); + return compound_head(page); +} + /* * Setup the page count before being freed into the page allocator for * the first time (boot or memory hotplug) @@ -314,6 +324,18 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page) return (compound_page_dtor *)page[1].lru.next; } +static inline int compound_order(struct page *page) +{ + if (!PageHead(page)) + return 0; + return (unsigned long)page[1].lru.prev; +} + +static inline void set_compound_order(struct page *page, unsigned long order) +{ + page[1].lru.prev = (void *)order; +} + /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of @@ -850,8 +872,26 @@ static inline int vma_wants_writenotify(struct vm_area_struct *vma) extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); +#ifdef __PAGETABLE_PUD_FOLDED +static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return 0; +} +#else int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +#endif + +#ifdef __PAGETABLE_PMD_FOLDED +static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, + unsigned long address) +{ + return 0; +} +#else int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); +#endif + int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); @@ -1130,6 +1170,11 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, #define FOLL_GET 0x04 /* do get_page on page */ #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ +typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr, + void *data); +extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, + unsigned long size, pte_fn_t fn, void *data); + #ifdef CONFIG_PROC_FS void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); #else diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c3852fd4a1c..e30687bad07 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -19,10 +19,16 @@ struct page { unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ atomic_t _count; /* Usage count, see below. */ - atomic_t _mapcount; /* Count of ptes mapped in mms, + union { + atomic_t _mapcount; /* Count of ptes mapped in mms, * to show when page is mapped * & limit reverse map searches. */ + struct { /* SLUB uses */ + short unsigned int inuse; + short unsigned int offset; + }; + }; union { struct { unsigned long private; /* Mapping-private opaque data: @@ -43,8 +49,15 @@ struct page { #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif + struct { /* SLUB uses */ + struct page *first_page; /* Compound pages */ + struct kmem_cache *slab; /* Pointer to slab */ + }; + }; + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* SLUB: pointer to free object */ }; - pgoff_t index; /* Our offset within mapping. */ struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ee9e3143df4..2f1544e8304 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -784,6 +784,18 @@ void sparse_init(void); void memory_present(int nid, unsigned long start, unsigned long end); unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); +/* + * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we + * need to check pfn validility within that MAX_ORDER_NR_PAGES block. + * pfn_valid_within() should be used in this case; we optimise this away + * when we have no holes within a MAX_ORDER_NR_PAGES block. + */ +#ifdef CONFIG_HOLES_IN_ZONE +#define pfn_valid_within(pfn) pfn_valid(pfn) +#else +#define pfn_valid_within(pfn) (1) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 96326594e55..ae2d79f2107 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -6,6 +6,7 @@ #define PAGE_FLAGS_H #include <linux/types.h> +#include <linux/mm_types.h> /* * Various page->flags bits: @@ -82,13 +83,11 @@ #define PG_private 11 /* If pagecache, has fs-private data */ #define PG_writeback 12 /* Page is under writeback */ -#define PG_nosave 13 /* Used for system suspend/resume */ #define PG_compound 14 /* Part of a compound page */ #define PG_swapcache 15 /* Swap page: swp_entry_t in private */ #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ #define PG_reclaim 17 /* To be reclaimed asap */ -#define PG_nosave_free 18 /* Used for system suspend/resume */ #define PG_buddy 19 /* Page is free, on buddy lists */ /* PG_owner_priv_1 users should have descriptive aliases */ @@ -214,16 +213,6 @@ static inline void SetPageUptodate(struct page *page) ret; \ }) -#define PageNosave(page) test_bit(PG_nosave, &(page)->flags) -#define SetPageNosave(page) set_bit(PG_nosave, &(page)->flags) -#define TestSetPageNosave(page) test_and_set_bit(PG_nosave, &(page)->flags) -#define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags) -#define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags) - -#define PageNosaveFree(page) test_bit(PG_nosave_free, &(page)->flags) -#define SetPageNosaveFree(page) set_bit(PG_nosave_free, &(page)->flags) -#define ClearPageNosaveFree(page) clear_bit(PG_nosave_free, &(page)->flags) - #define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) #define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) #define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags) @@ -241,6 +230,34 @@ static inline void SetPageUptodate(struct page *page) #define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags) #define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) +/* + * PG_reclaim is used in combination with PG_compound to mark the + * head and tail of a compound page + * + * PG_compound & PG_reclaim => Tail page + * PG_compound & ~PG_reclaim => Head page + */ + +#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) + +#define PageTail(page) ((page->flags & PG_head_tail_mask) \ + == PG_head_tail_mask) + +static inline void __SetPageTail(struct page *page) +{ + page->flags |= PG_head_tail_mask; +} + +static inline void __ClearPageTail(struct page *page) +{ + page->flags &= ~PG_head_tail_mask; +} + +#define PageHead(page) ((page->flags & PG_head_tail_mask) \ + == (1L << PG_compound)) +#define __SetPageHead(page) __SetPageCompound(page) +#define __ClearPageHead(page) __ClearPageCompound(page) + #ifdef CONFIG_SWAP #define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) #define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7a8dcb82a69..b4def5e083e 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -95,12 +95,23 @@ static inline struct page *grab_cache_page(struct address_space *mapping, unsign extern struct page * grab_cache_page_nowait(struct address_space *mapping, unsigned long index); +extern struct page * read_cache_page_async(struct address_space *mapping, + unsigned long index, filler_t *filler, + void *data); extern struct page * read_cache_page(struct address_space *mapping, unsigned long index, filler_t *filler, void *data); extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); +static inline struct page *read_mapping_page_async( + struct address_space *mapping, + unsigned long index, void *data) +{ + filler_t *filler = (filler_t *)mapping->a_ops->readpage; + return read_cache_page_async(mapping, index, filler, data); +} + static inline struct page *read_mapping_page(struct address_space *mapping, unsigned long index, void *data) { diff --git a/include/linux/poison.h b/include/linux/poison.h index 89580b76495..95f518b1768 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -18,6 +18,9 @@ #define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ #define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ +#define SLUB_RED_INACTIVE 0xbb +#define SLUB_RED_ACTIVE 0xcc + /* ...and for poisoning */ #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ #define POISON_FREE 0x6b /* for use-after-free poisoning */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index be4652a0545..f4f7a63cae1 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -104,6 +104,7 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); unsigned long task_vsize(struct mm_struct *); int task_statm(struct mm_struct *, int *, int *, int *, int *); char *task_mem(struct mm_struct *, char *); +void clear_refs_smap(struct mm_struct *mm); extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent); diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h new file mode 100644 index 00000000000..9371c6116df --- /dev/null +++ b/include/linux/quicklist.h @@ -0,0 +1,94 @@ +#ifndef LINUX_QUICKLIST_H +#define LINUX_QUICKLIST_H +/* + * Fast allocations and disposal of pages. Pages must be in the condition + * as needed after allocation when they are freed. Per cpu lists of pages + * are kept that only contain node local pages. + * + * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com> + */ +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/percpu.h> + +#ifdef CONFIG_QUICKLIST + +struct quicklist { + void *page; + int nr_pages; +}; + +DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +/* + * The two key functions quicklist_alloc and quicklist_free are inline so + * that they may be custom compiled for the platform. + * Specifying a NULL ctor can remove constructor support. Specifying + * a constant quicklist allows the determination of the exact address + * in the per cpu area. + * + * The fast patch in quicklist_alloc touched only a per cpu cacheline and + * the first cacheline of the page itself. There is minmal overhead involved. + */ +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p = NULL; + + q =&get_cpu_var(quicklist)[nr]; + p = q->page; + if (likely(p)) { + q->page = p[0]; + p[0] = NULL; + q->nr_pages--; + } + put_cpu_var(quicklist); + if (likely(p)) + return p; + + p = (void *)__get_free_page(flags | __GFP_ZERO); + if (ctor && p) + ctor(p); + return p; +} + +static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, + struct page *page) +{ + struct quicklist *q; + int nid = page_to_nid(page); + + if (unlikely(nid != numa_node_id())) { + if (dtor) + dtor(p); + __free_page(page); + return; + } + + q = &get_cpu_var(quicklist)[nr]; + *(void **)p = q->page; + q->page = p; + q->nr_pages++; + put_cpu_var(quicklist); +} + +static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) +{ + __quicklist_free(nr, dtor, pp, virt_to_page(pp)); +} + +static inline void quicklist_free_page(int nr, void (*dtor)(void *), + struct page *page) +{ + __quicklist_free(nr, dtor, page_address(page), page); +} + +void quicklist_trim(int nr, void (*dtor)(void *), + unsigned long min_pages, unsigned long max_free); + +unsigned long quicklist_total_size(void); + +#endif + +#endif /* LINUX_QUICKLIST_H */ + diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 586aaba9172..aa2653a159f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -39,7 +39,8 @@ #define PORT_RSA 13 #define PORT_NS16550A 14 #define PORT_XSCALE 15 -#define PORT_MAX_8250 15 /* max port ID */ +#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ +#define PORT_MAX_8250 16 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed @@ -135,6 +136,9 @@ /* Xilinx uartlite */ #define PORT_UARTLITE 74 +/* Blackfin bf5xx */ +#define PORT_BFIN 75 + #ifdef __KERNEL__ #include <linux/compiler.h> @@ -230,6 +234,8 @@ struct uart_port { #define UPIO_MEM32 (3) #define UPIO_AU (4) /* Au1x00 type IO */ #define UPIO_TSI (5) /* Tsi108/109 type IO */ +#define UPIO_DWAPB (6) /* DesignWare APB UART */ +#define UPIO_RM9000 (7) /* RM9000 type IO */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ @@ -260,6 +266,7 @@ struct uart_port { #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) #define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) +#define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) #define UPF_DEAD ((__force upf_t) (1 << 30)) #define UPF_IOREMAP ((__force upf_t) (1 << 31)) @@ -276,6 +283,7 @@ struct uart_port { struct device *dev; /* parent device */ unsigned char hub6; /* this should be in the 8250 driver */ unsigned char unused[3]; + void *private_data; /* generic platform data pointer */ }; /* diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h index 3c8a6aa7741..1c5ed7d92b0 100644 --- a/include/linux/serial_reg.h +++ b/include/linux/serial_reg.h @@ -38,6 +38,8 @@ #define UART_IIR_RDI 0x04 /* Receiver data interrupt */ #define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ +#define UART_IIR_BUSY 0x07 /* DesignWare APB Busy Detect */ + #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */ #define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 1ef822e31c7..71829efc40b 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -21,28 +21,25 @@ typedef struct kmem_cache kmem_cache_t __deprecated; * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. */ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ -#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ -#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ +#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ /* Flags passed to a constructor functions */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ -#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ -#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ /* * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); -extern int slab_is_available(void); +int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, @@ -57,6 +54,18 @@ unsigned int kmem_cache_size(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *); int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); +/* + * Please use this macro to create slab caches. Simply specify the + * name of the structure and maybe some flags that are listed above. + * + * The alignment of the struct determines object alignment. If you + * f.e. add ____cacheline_aligned_in_smp to the struct declaration + * then the objects will be properly aligned in SMP configurations. + */ +#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ + sizeof(struct __struct), __alignof__(struct __struct),\ + (__flags), NULL, NULL) + #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #else @@ -72,8 +81,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, */ void *__kmalloc(size_t, gfp_t); void *__kzalloc(size_t, gfp_t); +void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); -unsigned int ksize(const void *); +size_t ksize(const void *); /** * kcalloc - allocate memory for an array. The memory is set to zero. @@ -94,9 +104,14 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) * the appropriate general cache at compile time. */ -#ifdef CONFIG_SLAB +#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) +#ifdef CONFIG_SLUB +#include <linux/slub_def.h> +#else #include <linux/slab_def.h> +#endif /* !CONFIG_SLUB */ #else + /* * Fallback definitions for an allocator not wanting to provide * its own optimized kmalloc definitions (like SLOB). @@ -183,7 +198,7 @@ static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) * allocator where we care about the real place the memory allocation * request comes from. */ -#ifdef CONFIG_DEBUG_SLAB +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) extern void *__kmalloc_track_caller(size_t, gfp_t, void*); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, __builtin_return_address(0)) @@ -201,7 +216,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); * standard allocator where we care about the real place the memory * allocation request comes from. */ -#ifdef CONFIG_DEBUG_SLAB +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ @@ -218,6 +233,9 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #endif /* DEBUG_SLAB */ +extern const struct seq_operations slabinfo_op; +ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); + #endif /* __KERNEL__ */ #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h new file mode 100644 index 00000000000..ea27065e80e --- /dev/null +++ b/include/linux/slub_def.h @@ -0,0 +1,206 @@ +#ifndef _LINUX_SLUB_DEF_H +#define _LINUX_SLUB_DEF_H + +/* + * SLUB : A Slab allocator without object queues. + * + * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> + */ +#include <linux/types.h> +#include <linux/gfp.h> +#include <linux/workqueue.h> +#include <linux/kobject.h> + +struct kmem_cache_node { + spinlock_t list_lock; /* Protect partial list and nr_partial */ + unsigned long nr_partial; + atomic_long_t nr_slabs; + struct list_head partial; + struct list_head full; +}; + +/* + * Slab cache management. + */ +struct kmem_cache { + /* Used for retriving partial slabs etc */ + unsigned long flags; + int size; /* The size of an object including meta data */ + int objsize; /* The size of an object without meta data */ + int offset; /* Free pointer offset. */ + unsigned int order; + + /* + * Avoid an extra cache line for UP, SMP and for the node local to + * struct kmem_cache. + */ + struct kmem_cache_node local_node; + + /* Allocation and freeing of slabs */ + int objects; /* Number of objects in slab */ + int refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *, struct kmem_cache *, unsigned long); + void (*dtor)(void *, struct kmem_cache *, unsigned long); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ + const char *name; /* Name (only for display!) */ + struct list_head list; /* List of slab caches */ + struct kobject kobj; /* For sysfs */ + +#ifdef CONFIG_NUMA + int defrag_ratio; + struct kmem_cache_node *node[MAX_NUMNODES]; +#endif + struct page *cpu_slab[NR_CPUS]; +}; + +/* + * Kmalloc subsystem. + */ +#define KMALLOC_SHIFT_LOW 3 + +#ifdef CONFIG_LARGE_ALLOCS +#define KMALLOC_SHIFT_HIGH 25 +#else +#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 +#define KMALLOC_SHIFT_HIGH 20 +#else +#define KMALLOC_SHIFT_HIGH 18 +#endif +#endif + +/* + * We keep the general caches in an array of slab caches that are used for + * 2^x bytes of allocations. + */ +extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; + +/* + * Sorry that the following has to be that ugly but some versions of GCC + * have trouble with constant propagation and loops. + */ +static inline int kmalloc_index(int size) +{ + /* + * We should return 0 if size == 0 but we use the smallest object + * here for SLAB legacy reasons. + */ + WARN_ON_ONCE(size == 0); + + if (size > 64 && size <= 96) + return 1; + if (size > 128 && size <= 192) + return 2; + if (size <= 8) return 3; + if (size <= 16) return 4; + if (size <= 32) return 5; + if (size <= 64) return 6; + if (size <= 128) return 7; + if (size <= 256) return 8; + if (size <= 512) return 9; + if (size <= 1024) return 10; + if (size <= 2 * 1024) return 11; + if (size <= 4 * 1024) return 12; + if (size <= 8 * 1024) return 13; + if (size <= 16 * 1024) return 14; + if (size <= 32 * 1024) return 15; + if (size <= 64 * 1024) return 16; + if (size <= 128 * 1024) return 17; + if (size <= 256 * 1024) return 18; +#if KMALLOC_SHIFT_HIGH > 18 + if (size <= 512 * 1024) return 19; + if (size <= 1024 * 1024) return 20; +#endif +#if KMALLOC_SHIFT_HIGH > 20 + if (size <= 2 * 1024 * 1024) return 21; + if (size <= 4 * 1024 * 1024) return 22; + if (size <= 8 * 1024 * 1024) return 23; + if (size <= 16 * 1024 * 1024) return 24; + if (size <= 32 * 1024 * 1024) return 25; +#endif + return -1; + +/* + * What we really wanted to do and cannot do because of compiler issues is: + * int i; + * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) + * if (size <= (1 << i)) + * return i; + */ +} + +/* + * Find the slab cache for a given combination of allocation flags and size. + * + * This ought to end up with a global pointer to the right cache + * in kmalloc_caches. + */ +static inline struct kmem_cache *kmalloc_slab(size_t size) +{ + int index = kmalloc_index(size); + + if (index == 0) + return NULL; + + if (index < 0) { + /* + * Generate a link failure. Would be great if we could + * do something to stop the compile here. + */ + extern void __kmalloc_size_too_large(void); + __kmalloc_size_too_large(); + } + return &kmalloc_caches[index]; +} + +#ifdef CONFIG_ZONE_DMA +#define SLUB_DMA __GFP_DMA +#else +/* Disable DMA functionality */ +#define SLUB_DMA 0 +#endif + +static inline void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return NULL; + + return kmem_cache_alloc(s, flags); + } else + return __kmalloc(size, flags); +} + +static inline void *kzalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return NULL; + + return kmem_cache_zalloc(s, flags); + } else + return __kzalloc(size, flags); +} + +#ifdef CONFIG_NUMA +extern void *__kmalloc_node(size_t size, gfp_t flags, int node); + +static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); + + if (!s) + return NULL; + + return kmem_cache_alloc_node(s, flags, node); + } else + return __kmalloc_node(size, flags, node); +} +#endif + +#endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/spi/ad7877.h b/include/linux/spi/ad7877.h new file mode 100644 index 00000000000..cdbed816f25 --- /dev/null +++ b/include/linux/spi/ad7877.h @@ -0,0 +1,24 @@ +/* linux/spi/ad7877.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7877_platform_data { + u16 model; /* 7877 */ + u16 vref_delay_usecs; /* 0 for external vref; etc */ + u16 x_plate_ohms; + u16 y_plate_ohms; + + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + u8 stopacq_polarity; /* 1 = Active HIGH, 0 = Active LOW */ + u8 first_conversion_delay; /* 0 = 0.5us, 1 = 128us, 2 = 1ms, 3 = 8ms */ + u8 acquisition_time; /* 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 averaging; /* 0 = 1, 1 = 4, 2 = 8, 3 = 16 */ + u8 pen_down_acc_interval; /* 0 = covert once, 1 = every 0.5 ms, + 2 = ever 1 ms, 3 = every 8 ms,*/ +}; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index bf99bd49f8e..96868be9c21 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -8,6 +8,7 @@ #include <linux/notifier.h> #include <linux/init.h> #include <linux/pm.h> +#include <linux/mm.h> /* struct pbe is used for creating lists of pages that should be restored * atomically during the resume from disk, because the page frames they have @@ -23,36 +24,32 @@ struct pbe { extern void drain_local_pages(void); extern void mark_free_pages(struct zone *zone); -#ifdef CONFIG_PM -/* kernel/power/swsusp.c */ -extern int software_suspend(void); - -#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) +#if defined(CONFIG_PM) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) extern int pm_prepare_console(void); extern void pm_restore_console(void); #else static inline int pm_prepare_console(void) { return 0; } static inline void pm_restore_console(void) {} -#endif /* defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) */ +#endif + +#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) +/* kernel/power/snapshot.c */ +extern void __init register_nosave_region(unsigned long, unsigned long); +extern int swsusp_page_is_forbidden(struct page *); +extern void swsusp_set_page_free(struct page *); +extern void swsusp_unset_page_free(struct page *); +extern unsigned long get_safe_page(gfp_t gfp_mask); #else -static inline int software_suspend(void) -{ - printk("Warning: fake suspend called\n"); - return -ENOSYS; -} -#endif /* CONFIG_PM */ +static inline void register_nosave_region(unsigned long b, unsigned long e) {} +static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } +static inline void swsusp_set_page_free(struct page *p) {} +static inline void swsusp_unset_page_free(struct page *p) {} +#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */ void save_processor_state(void); void restore_processor_state(void); struct saved_context; void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); -unsigned long get_safe_page(gfp_t gfp_mask); - -/* - * XXX: We try to keep some more pages free so that I/O operations succeed - * without paging. Might this be more? - */ -#define PAGES_FOR_IO 1024 #endif /* _LINUX_SWSUSP_H */ diff --git a/include/linux/usb_sl811.h b/include/linux/usb_sl811.h new file mode 100644 index 00000000000..4f2d012d730 --- /dev/null +++ b/include/linux/usb_sl811.h @@ -0,0 +1,26 @@ + +/* + * board initialization should put one of these into dev->platform_data + * and place the sl811hs onto platform_bus named "sl811-hcd". + */ + +struct sl811_platform_data { + unsigned can_wakeup:1; + + /* given port_power, msec/2 after power on till power good */ + u8 potpg; + + /* mA/2 power supplied on this port (max = default = 250) */ + u8 power; + + /* sl811 relies on an external source of VBUS current */ + void (*port_power)(struct device *dev, int is_on); + + /* pulse sl811 nRST (probably with a GPIO) */ + void (*reset)(struct device *dev); + + // some boards need something like these: + // int (*check_overcurrent)(struct device *dev); + // void (*clock_enable)(struct device *dev, int is_on); +}; + |