diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2007-02-20 19:13:30 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-02-20 19:13:30 +0000 |
commit | 5a84d159061d914c8dd4aa372ac6e9529c2be453 (patch) | |
tree | 9b08af78085334af44414adafe0096276f8fe0ff /include/linux/mmzone.h | |
parent | e80a0e6e7ccdf64575d4384cb4172860422f5b81 (diff) | |
parent | 7d477a04a619e90ee08724e8f2d8803c6bdfcef8 (diff) |
Merge ARM fixes
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 42 |
1 files changed, 32 insertions, 10 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b262f47961f..ee9e3143df4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,15 +47,20 @@ struct zone_padding { #endif enum zone_stat_item { + /* First 128 byte cacheline (assuming 64 bit words) */ + NR_FREE_PAGES, + NR_INACTIVE, + NR_ACTIVE, NR_ANON_PAGES, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ NR_FILE_PAGES, - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, - NR_PAGETABLE, /* used for pagetables */ NR_FILE_DIRTY, NR_WRITEBACK, + /* Second 128 byte cacheline */ + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, + NR_PAGETABLE, /* used for pagetables */ NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_BOUNCE, NR_VMSCAN_WRITE, @@ -91,6 +96,7 @@ struct per_cpu_pageset { #endif enum zone_type { +#ifdef CONFIG_ZONE_DMA /* * ZONE_DMA is used when there are devices that are not able * to do DMA to all of addressable memory (ZONE_NORMAL). Then we @@ -111,6 +117,7 @@ enum zone_type { * <16M. */ ZONE_DMA, +#endif #ifdef CONFIG_ZONE_DMA32 /* * x86_64 needs two ZONE_DMAs because it supports devices that are @@ -147,15 +154,30 @@ enum zone_type { * match the requested limits. See gfp_zone() in include/linux/gfp.h */ -#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM) +/* + * Count the active zones. Note that the use of defined(X) outside + * #if and family is not necessarily defined so ensure we cannot use + * it later. Use __ZONE_COUNT to work out how many shift bits we need. + */ +#define __ZONE_COUNT ( \ + defined(CONFIG_ZONE_DMA) \ + + defined(CONFIG_ZONE_DMA32) \ + + 1 \ + + defined(CONFIG_HIGHMEM) \ +) +#if __ZONE_COUNT < 2 +#define ZONES_SHIFT 0 +#elif __ZONE_COUNT <= 2 #define ZONES_SHIFT 1 -#else +#elif __ZONE_COUNT <= 4 #define ZONES_SHIFT 2 +#else +#error ZONES_SHIFT -- too many zones configured adjust calculation #endif +#undef __ZONE_COUNT struct zone { /* Fields commonly accessed by the page allocator */ - unsigned long free_pages; unsigned long pages_min, pages_low, pages_high; /* * We don't know if the memory that we're going to allocate will be freeable @@ -197,8 +219,6 @@ struct zone { struct list_head inactive_list; unsigned long nr_scan_active; unsigned long nr_scan_inactive; - unsigned long nr_active; - unsigned long nr_inactive; unsigned long pages_scanned; /* since last reclaim */ int all_unreclaimable; /* All pages pinned */ @@ -442,8 +462,6 @@ typedef struct pglist_data { #include <linux/memory_hotplug.h> -void __get_zone_counts(unsigned long *active, unsigned long *inactive, - unsigned long *free, struct pglist_data *pgdat); void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free); void build_all_zonelists(void); @@ -523,7 +541,11 @@ static inline int is_dma32(struct zone *zone) static inline int is_dma(struct zone *zone) { +#ifdef CONFIG_ZONE_DMA return zone == zone->zone_pgdat->node_zones + ZONE_DMA; +#else + return 0; +#endif } /* These two functions are used to setup the per zone pages min values */ |