summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/Kconfig.debug17
-rw-r--r--mm/Makefile1
-rw-r--r--mm/allocpercpu.c4
-rw-r--r--mm/backing-dev.c26
-rw-r--r--mm/debug-pagealloc.c129
-rw-r--r--mm/highmem.c110
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/internal.h8
-rw-r--r--mm/memory.c39
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/oom_kill.c12
-rw-r--r--mm/page-writeback.c46
-rw-r--r--mm/page_alloc.c29
-rw-r--r--mm/pdflush.c2
-rw-r--r--mm/percpu.c130
-rw-r--r--mm/readahead.c25
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slob.c45
-rw-r--r--mm/slub.c66
-rw-r--r--mm/sparse.c4
-rw-r--r--mm/swap.c23
-rw-r--r--mm/util.c30
-rw-r--r--mm/vmalloc.c19
-rw-r--r--mm/vmscan.c101
-rw-r--r--mm/vmstat.c13
26 files changed, 648 insertions, 255 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index a5b77811fdf..b53427ad30a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -206,7 +206,6 @@ config VIRT_TO_BUS
config UNEVICTABLE_LRU
bool "Add LRU list to track non-evictable pages"
default y
- depends on MMU
help
Keeps unevictable pages off of the active and inactive pageout
lists, so kswapd will not waste CPU time or have its balancing
@@ -214,5 +213,13 @@ config UNEVICTABLE_LRU
will use one page flag and increase the code size a little,
say Y unless you know what you are doing.
+config HAVE_MLOCK
+ bool
+ default y if MMU=y
+
+config HAVE_MLOCKED_PAGE_BIT
+ bool
+ default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
+
config MMU_NOTIFIER
bool
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
new file mode 100644
index 00000000000..c8d62d49a44
--- /dev/null
+++ b/mm/Kconfig.debug
@@ -0,0 +1,17 @@
+config WANT_PAGE_DEBUG_FLAGS
+ bool
+
+config PAGE_POISONING
+ bool "Debug page memory allocations"
+ depends on DEBUG_KERNEL && !ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ depends on !HIBERNATION
+ select DEBUG_PAGEALLOC
+ select WANT_PAGE_DEBUG_FLAGS
+ help
+ Fill the pages with poison patterns after free_pages() and verify
+ the patterns before alloc_pages(). This results in a large slowdown,
+ but helps to find certain types of memory corruptions.
+
+ This option cannot enalbe with hibernation. Otherwise, it will get
+ wrong messages for memory corruption because the free pages are not
+ saved to the suspend image.
diff --git a/mm/Makefile b/mm/Makefile
index 818569b68f4..ec73c68b601 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
+obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_FAILSLAB) += failslab.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 3653c570232..139d5b7b662 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -120,7 +120,7 @@ void *__alloc_percpu(size_t size, size_t align)
* on it. Larger alignment should only be used for module
* percpu sections on SMP for which this path isn't used.
*/
- WARN_ON_ONCE(align > __alignof__(unsigned long long));
+ WARN_ON_ONCE(align > SMP_CACHE_BYTES);
if (unlikely(!pdata))
return NULL;
@@ -143,7 +143,7 @@ void free_percpu(void *__pdata)
{
if (unlikely(!__pdata))
return;
- __percpu_depopulate_mask(__pdata, &cpu_possible_map);
+ __percpu_depopulate_mask(__pdata, cpu_possible_mask);
kfree(__percpu_disguise(__pdata));
}
EXPORT_SYMBOL_GPL(free_percpu);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8e858744413..be68c956a66 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -2,11 +2,24 @@
#include <linux/wait.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/device.h>
+void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+{
+}
+EXPORT_SYMBOL(default_unplug_io_fn);
+
+struct backing_dev_info default_backing_dev_info = {
+ .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
+ .state = 0,
+ .capabilities = BDI_CAP_MAP_COPY,
+ .unplug_io_fn = default_unplug_io_fn,
+};
+EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class;
@@ -166,9 +179,20 @@ static __init int bdi_class_init(void)
bdi_debug_init();
return 0;
}
-
postcore_initcall(bdi_class_init);
+static int __init default_bdi_init(void)
+{
+ int err;
+
+ err = bdi_init(&default_backing_dev_info);
+ if (!err)
+ bdi_register(&default_backing_dev_info, NULL, "default");
+
+ return err;
+}
+subsys_initcall(default_bdi_init);
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
new file mode 100644
index 00000000000..a1e3324de2b
--- /dev/null
+++ b/mm/debug-pagealloc.c
@@ -0,0 +1,129 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/page-debug-flags.h>
+#include <linux/poison.h>
+
+static inline void set_page_poison(struct page *page)
+{
+ __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+}
+
+static inline void clear_page_poison(struct page *page)
+{
+ __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+}
+
+static inline bool page_poison(struct page *page)
+{
+ return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
+}
+
+static void poison_highpage(struct page *page)
+{
+ /*
+ * Page poisoning for highmem pages is not implemented.
+ *
+ * This can be called from interrupt contexts.
+ * So we need to create a new kmap_atomic slot for this
+ * application and it will need interrupt protection.
+ */
+}
+
+static void poison_page(struct page *page)
+{
+ void *addr;
+
+ if (PageHighMem(page)) {
+ poison_highpage(page);
+ return;
+ }
+ set_page_poison(page);
+ addr = page_address(page);
+ memset(addr, PAGE_POISON, PAGE_SIZE);
+}
+
+static void poison_pages(struct page *page, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ poison_page(page + i);
+}
+
+static bool single_bit_flip(unsigned char a, unsigned char b)
+{
+ unsigned char error = a ^ b;
+
+ return error && !(error & (error - 1));
+}
+
+static void check_poison_mem(unsigned char *mem, size_t bytes)
+{
+ unsigned char *start;
+ unsigned char *end;
+
+ for (start = mem; start < mem + bytes; start++) {
+ if (*start != PAGE_POISON)
+ break;
+ }
+ if (start == mem + bytes)
+ return;
+
+ for (end = mem + bytes - 1; end > start; end--) {
+ if (*end != PAGE_POISON)
+ break;
+ }
+
+ if (!printk_ratelimit())
+ return;
+ else if (start == end && single_bit_flip(*start, PAGE_POISON))
+ printk(KERN_ERR "pagealloc: single bit error\n");
+ else
+ printk(KERN_ERR "pagealloc: memory corruption\n");
+
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
+ end - start + 1, 1);
+ dump_stack();
+}
+
+static void unpoison_highpage(struct page *page)
+{
+ /*
+ * See comment in poison_highpage().
+ * Highmem pages should not be poisoned for now
+ */
+ BUG_ON(page_poison(page));
+}
+
+static void unpoison_page(struct page *page)
+{
+ if (PageHighMem(page)) {
+ unpoison_highpage(page);
+ return;
+ }
+ if (page_poison(page)) {
+ void *addr = page_address(page);
+
+ check_poison_mem(addr, PAGE_SIZE);
+ clear_page_poison(page);
+ }
+}
+
+static void unpoison_pages(struct page *page, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ unpoison_page(page + i);
+}
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (!debug_pagealloc_enabled)
+ return;
+
+ if (enable)
+ unpoison_pages(page, numpages);
+ else
+ poison_pages(page, numpages);
+}
diff --git a/mm/highmem.c b/mm/highmem.c
index b36b83b920f..68eb1d9b63f 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -67,6 +67,25 @@ pte_t * pkmap_page_table;
static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
+/*
+ * Most architectures have no use for kmap_high_get(), so let's abstract
+ * the disabling of IRQ out of the locking in that case to save on a
+ * potential useless overhead.
+ */
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+#define lock_kmap() spin_lock_irq(&kmap_lock)
+#define unlock_kmap() spin_unlock_irq(&kmap_lock)
+#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
+#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
+#else
+#define lock_kmap() spin_lock(&kmap_lock)
+#define unlock_kmap() spin_unlock(&kmap_lock)
+#define lock_kmap_any(flags) \
+ do { spin_lock(&kmap_lock); (void)(flags); } while (0)
+#define unlock_kmap_any(flags) \
+ do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+#endif
+
static void flush_all_zero_pkmaps(void)
{
int i;
@@ -113,9 +132,9 @@ static void flush_all_zero_pkmaps(void)
*/
void kmap_flush_unused(void)
{
- spin_lock(&kmap_lock);
+ lock_kmap();
flush_all_zero_pkmaps();
- spin_unlock(&kmap_lock);
+ unlock_kmap();
}
static inline unsigned long map_new_virtual(struct page *page)
@@ -145,10 +164,10 @@ start:
__set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&pkmap_map_wait, &wait);
- spin_unlock(&kmap_lock);
+ unlock_kmap();
schedule();
remove_wait_queue(&pkmap_map_wait, &wait);
- spin_lock(&kmap_lock);
+ lock_kmap();
/* Somebody else might have mapped it while we slept */
if (page_address(page))
@@ -184,29 +203,59 @@ void *kmap_high(struct page *page)
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
*/
- spin_lock(&kmap_lock);
+ lock_kmap();
vaddr = (unsigned long)page_address(page);
if (!vaddr)
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
- spin_unlock(&kmap_lock);
+ unlock_kmap();
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_high);
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+/**
+ * kmap_high_get - pin a highmem page into memory
+ * @page: &struct page to pin
+ *
+ * Returns the page's current virtual memory address, or NULL if no mapping
+ * exists. When and only when a non null address is returned then a
+ * matching call to kunmap_high() is necessary.
+ *
+ * This can be called from any context.
+ */
+void *kmap_high_get(struct page *page)
+{
+ unsigned long vaddr, flags;
+
+ lock_kmap_any(flags);
+ vaddr = (unsigned long)page_address(page);
+ if (vaddr) {
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
+ pkmap_count[PKMAP_NR(vaddr)]++;
+ }
+ unlock_kmap_any(flags);
+ return (void*) vaddr;
+}
+#endif
+
/**
* kunmap_high - map a highmem page into memory
* @page: &struct page to unmap
+ *
+ * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
+ * only from user context.
*/
void kunmap_high(struct page *page)
{
unsigned long vaddr;
unsigned long nr;
+ unsigned long flags;
int need_wakeup;
- spin_lock(&kmap_lock);
+ lock_kmap_any(flags);
vaddr = (unsigned long)page_address(page);
BUG_ON(!vaddr);
nr = PKMAP_NR(vaddr);
@@ -232,7 +281,7 @@ void kunmap_high(struct page *page)
*/
need_wakeup = waitqueue_active(&pkmap_map_wait);
}
- spin_unlock(&kmap_lock);
+ unlock_kmap_any(flags);
/* do wake-up, if needed, race-free outside of the spin lock */
if (need_wakeup)
@@ -373,3 +422,48 @@ void __init page_address_init(void)
}
#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
+
+#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
+
+void debug_kmap_atomic(enum km_type type)
+{
+ static unsigned warn_count = 10;
+
+ if (unlikely(warn_count == 0))
+ return;
+
+ if (unlikely(in_interrupt())) {
+ if (in_irq()) {
+ if (type != KM_IRQ0 && type != KM_IRQ1 &&
+ type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
+ type != KM_BOUNCE_READ) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ } else if (!irqs_disabled()) { /* softirq */
+ if (type != KM_IRQ0 && type != KM_IRQ1 &&
+ type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
+ type != KM_SKB_SUNRPC_DATA &&
+ type != KM_SKB_DATA_SOFTIRQ &&
+ type != KM_BOUNCE_READ) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ }
+ }
+
+ if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
+ type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
+ if (!irqs_disabled()) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
+ if (irq_count() == 0 && !irqs_disabled()) {
+ WARN_ON(1);
+ warn_count--;
+ }
+ }
+}
+
+#endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 107da3d809a..28c655ba935 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -918,7 +918,7 @@ static void return_unused_surplus_pages(struct hstate *h,
* an instantiated the change should be committed via vma_commit_reservation.
* No action is required on failure.
*/
-static int vma_needs_reservation(struct hstate *h,
+static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -933,7 +933,7 @@ static int vma_needs_reservation(struct hstate *h,
return 1;
} else {
- int err;
+ long err;
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *reservations = vma_resv_map(vma);
@@ -969,7 +969,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
struct page *page;
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
- unsigned int chg;
+ long chg;
/*
* Processes that did not create the mapping will have no reserves and
diff --git a/mm/internal.h b/mm/internal.h
index 478223b73a2..987bb03fbdd 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,6 +63,7 @@ static inline unsigned long page_order(struct page *page)
return page_private(page);
}
+#ifdef CONFIG_HAVE_MLOCK
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -71,6 +72,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
+#endif
#ifdef CONFIG_UNEVICTABLE_LRU
/*
@@ -90,7 +92,7 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
}
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma.
@@ -165,7 +167,7 @@ static inline void free_page_mlock(struct page *page)
}
}
-#else /* CONFIG_UNEVICTABLE_LRU */
+#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
return 0;
@@ -175,7 +177,7 @@ static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
static inline void free_page_mlock(struct page *page) { }
-#endif /* CONFIG_UNEVICTABLE_LRU */
+#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
/*
* Return the mem_map entry representing the 'offset' subpage within
diff --git a/mm/memory.c b/mm/memory.c
index baa999e87cd..cf6873e91c6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1151,6 +1151,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
+ /*
+ * pte_mkyoung() would be more correct here, but atomic care
+ * is needed to avoid losing the dirty bit: it is easier to use
+ * mark_page_accessed().
+ */
mark_page_accessed(page);
}
unlock:
@@ -1665,9 +1670,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
*/
- if (addr == vma->vm_start && end == vma->vm_end)
+ if (addr == vma->vm_start && end == vma->vm_end) {
vma->vm_pgoff = pfn;
- else if (is_cow_mapping(vma->vm_flags))
+ vma->vm_flags |= VM_PFN_AT_MMAP;
+ } else if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
@@ -1679,6 +1685,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
* needed from higher level routine calling unmap_vmas
*/
vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
+ vma->vm_flags &= ~VM_PFN_AT_MMAP;
return -EINVAL;
}
@@ -1938,6 +1945,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* get_user_pages(.write=1, .force=1).
*/
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+ struct vm_fault vmf;
+ int tmp;
+
+ vmf.virtual_address = (void __user *)(address &
+ PAGE_MASK);
+ vmf.pgoff = old_page->index;
+ vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+ vmf.page = old_page;
+
/*
* Notify the address space that the page is about to
* become writable so that it can prohibit this or wait
@@ -1949,8 +1965,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
- if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+ tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+ if (unlikely(tmp &
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+ ret = tmp;
goto unwritable_page;
+ }
/*
* Since we dropped the lock we need to revalidate
@@ -2099,7 +2119,7 @@ oom:
unwritable_page:
page_cache_release(old_page);
- return VM_FAULT_SIGBUS;
+ return ret;
}
/*
@@ -2433,8 +2453,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(PGMAJFAULT);
}
- mark_page_accessed(page);
-
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2643,9 +2661,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* to become writable
*/
if (vma->vm_ops->page_mkwrite) {
+ int tmp;
+
unlock_page(page);
- if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
- ret = VM_FAULT_SIGBUS;
+ vmf.flags |= FAULT_FLAG_MKWRITE;
+ tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+ if (unlikely(tmp &
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+ ret = tmp;
anon = 1; /* no anon but release vmf.page */
goto out_unlocked;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 00ced3ee49a..1abb9185a68 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,6 +20,7 @@
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
+#include <linux/ima.h>
#include <linux/hugetlb.h>
#include <linux/profile.h>
#include <linux/module.h>
@@ -1049,6 +1050,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
if (error)
return error;
+ error = ima_file_mmap(file, prot);
+ if (error)
+ return error;
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 40ba05061a4..d3b9bac085b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -55,7 +55,7 @@ static DEFINE_SPINLOCK(zone_scan_lock);
unsigned long badness(struct task_struct *p, unsigned long uptime)
{
- unsigned long points, cpu_time, run_time, s;
+ unsigned long points, cpu_time, run_time;
struct mm_struct *mm;
struct task_struct *child;
@@ -110,12 +110,10 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
else
run_time = 0;
- s = int_sqrt(cpu_time);
- if (s)
- points /= s;
- s = int_sqrt(int_sqrt(run_time));
- if (s)
- points /= s;
+ if (cpu_time)
+ points /= int_sqrt(cpu_time);
+ if (run_time)
+ points /= int_sqrt(int_sqrt(run_time));
/*
* Niced processes are most likely less important, so double
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 74dc57c7434..30351f0063a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -66,7 +66,7 @@ static inline long sync_writeback_pages(void)
/*
* Start background writeback (via pdflush) at this percentage
*/
-int dirty_background_ratio = 5;
+int dirty_background_ratio = 10;
/*
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
@@ -83,7 +83,7 @@ int vm_highmem_is_dirtyable;
/*
* The generator of dirty data starts writeback at this percentage
*/
-int vm_dirty_ratio = 10;
+int vm_dirty_ratio = 20;
/*
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
@@ -92,14 +92,14 @@ int vm_dirty_ratio = 10;
unsigned long vm_dirty_bytes;
/*
- * The interval between `kupdate'-style writebacks, in jiffies
+ * The interval between `kupdate'-style writebacks
*/
-int dirty_writeback_interval = 5 * HZ;
+unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */
/*
- * The longest number of jiffies for which data is allowed to remain dirty
+ * The longest time for which data is allowed to remain dirty
*/
-int dirty_expire_interval = 30 * HZ;
+unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */
/*
* Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,9 +770,9 @@ static void wb_kupdate(unsigned long arg)
sync_supers();
- oldest_jif = jiffies - dirty_expire_interval;
+ oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval);
start_jif = jiffies;
- next_jif = start_jif + dirty_writeback_interval;
+ next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
nr_to_write = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
@@ -801,9 +801,10 @@ static void wb_kupdate(unsigned long arg)
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, file, buffer, length, ppos);
if (dirty_writeback_interval)
- mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
+ mod_timer(&wb_timer, jiffies +
+ msecs_to_jiffies(dirty_writeback_interval * 10));
else
del_timer(&wb_timer);
return 0;
@@ -905,7 +906,8 @@ void __init page_writeback_init(void)
{
int shift;
- mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
+ mod_timer(&wb_timer,
+ jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
@@ -1198,6 +1200,20 @@ int __set_page_dirty_no_writeback(struct page *page)
}
/*
+ * Helper function for set_page_dirty family.
+ * NOTE: This relies on being atomic wrt interrupts.
+ */
+void account_page_dirtied(struct page *page, struct address_space *mapping)
+{
+ if (mapping_cap_account_dirty(mapping)) {
+ __inc_zone_page_state(page, NR_FILE_DIRTY);
+ __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ task_dirty_inc(current);
+ task_io_account_write(PAGE_CACHE_SIZE);
+ }
+}
+
+/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
@@ -1226,13 +1242,7 @@ int __set_page_dirty_nobuffers(struct page *page)
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- if (mapping_cap_account_dirty(mapping)) {
- __inc_zone_page_state(page, NR_FILE_DIRTY);
- __inc_bdi_stat(mapping->backing_dev_info,
- BDI_RECLAIMABLE);
- task_dirty_inc(current);
- task_io_account_write(PAGE_CACHE_SIZE);
- }
+ account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a3803ea8c27..0284e528748 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -922,13 +922,10 @@ static void drain_pages(unsigned int cpu)
unsigned long flags;
struct zone *zone;
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- if (!populated_zone(zone))
- continue;
-
pset = zone_pcp(zone, cpu);
pcp = &pset->pcp;
@@ -1585,7 +1582,8 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
+ did_some_progress = try_to_free_pages(zonelist, order,
+ gfp_mask, nodemask);
p->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
@@ -1879,10 +1877,7 @@ void show_free_areas(void)
int cpu;
struct zone *zone;
- for_each_zone(zone) {
- if (!populated_zone(zone))
- continue;
-
+ for_each_populated_zone(zone) {
show_node(zone);
printk("%s per-cpu:\n", zone->name);
@@ -1922,12 +1917,9 @@ void show_free_areas(void)
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE));
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
int i;
- if (!populated_zone(zone))
- continue;
-
show_node(zone);
printk("%s"
" free:%lukB"
@@ -1967,12 +1959,9 @@ void show_free_areas(void)
printk("\n");
}
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
unsigned long nr[MAX_ORDER], flags, order, total = 0;
- if (!populated_zone(zone))
- continue;
-
show_node(zone);
printk("%s: ", zone->name);
@@ -2784,11 +2773,7 @@ static int __cpuinit process_zones(int cpu)
node_set_state(node, N_CPU); /* this node has a cpu */
- for_each_zone(zone) {
-
- if (!populated_zone(zone))
- continue;
-
+ for_each_populated_zone(zone) {
zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
GFP_KERNEL, node);
if (!zone_pcp(zone, cpu))
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 15de509b68f..118905e3d78 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -191,7 +191,7 @@ static int pdflush(void *dummy)
/*
* Some configs put our parent kthread in a limited cpuset,
- * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL.
+ * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
* Our needs are more modest - cut back to our cpusets cpus_allowed.
* This is needed as pdflush's are dynamically created and destroyed.
* The boottime pdflush's are easily placed w/o these 2 lines.
diff --git a/mm/percpu.c b/mm/percpu.c
index bfe6a3afaf4..1aa5d8fbca1 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,7 +46,8 @@
* - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
*
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
- * regular address to percpu pointer and back
+ * regular address to percpu pointer and back if they need to be
+ * different from the default
*
* - use pcpu_setup_first_chunk() during percpu area initialization to
* setup the first chunk containing the kernel static percpu area
@@ -67,11 +68,24 @@
#include <linux/workqueue.h>
#include <asm/cacheflush.h>
+#include <asm/sections.h>
#include <asm/tlbflush.h>
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
+/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
+#ifndef __addr_to_pcpu_ptr
+#define __addr_to_pcpu_ptr(addr) \
+ (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
+ + (unsigned long)__per_cpu_start)
+#endif
+#ifndef __pcpu_ptr_to_addr
+#define __pcpu_ptr_to_addr(ptr) \
+ (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
+ - (unsigned long)__per_cpu_start)
+#endif
+
struct pcpu_chunk {
struct list_head list; /* linked to pcpu_slot lists */
struct rb_node rb_node; /* key is chunk->vm->addr */
@@ -1013,8 +1027,8 @@ EXPORT_SYMBOL_GPL(free_percpu);
* @get_page_fn: callback to fetch page pointer
* @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
* @base_addr: mapped address, NULL for auto
* @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
*
@@ -1039,14 +1053,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
* limited offset range for symbol relocations to guarantee module
* percpu symbols fall inside the relocatable range.
*
+ * @dyn_size, if non-negative, determines the number of bytes
+ * available for dynamic allocation in the first chunk. Specifying
+ * non-negative value makes percpu leave alone the area beyond
+ * @static_size + @reserved_size + @dyn_size.
+ *
* @unit_size, if non-negative, specifies unit size and must be
* aligned to PAGE_SIZE and equal to or larger than @static_size +
- * @reserved_size + @dyn_size.
- *
- * @dyn_size, if non-negative, limits the number of bytes available
- * for dynamic allocation in the first chunk. Specifying non-negative
- * value make percpu leave alone the area beyond @static_size +
- * @reserved_size + @dyn_size.
+ * @reserved_size + if non-negative, @dyn_size.
*
* Non-null @base_addr means that the caller already allocated virtual
* region for the first chunk and mapped it. percpu must not mess
@@ -1069,12 +1083,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
*/
size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
size_t static_size, size_t reserved_size,
- ssize_t unit_size, ssize_t dyn_size,
+ ssize_t dyn_size, ssize_t unit_size,
void *base_addr,
pcpu_populate_pte_fn_t populate_pte_fn)
{
static struct vm_struct first_vm;
static int smap[2], dmap[2];
+ size_t size_sum = static_size + reserved_size +
+ (dyn_size >= 0 ? dyn_size : 0);
struct pcpu_chunk *schunk, *dchunk = NULL;
unsigned int cpu;
int nr_pages;
@@ -1085,20 +1101,18 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
BUG_ON(!static_size);
if (unit_size >= 0) {
- BUG_ON(unit_size < static_size + reserved_size +
- (dyn_size >= 0 ? dyn_size : 0));
+ BUG_ON(unit_size < size_sum);
BUG_ON(unit_size & ~PAGE_MASK);
- } else {
- BUG_ON(dyn_size >= 0);
+ BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
+ } else
BUG_ON(base_addr);
- }
BUG_ON(base_addr && populate_pte_fn);
if (unit_size >= 0)
pcpu_unit_pages = unit_size >> PAGE_SHIFT;
else
pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
- PFN_UP(static_size + reserved_size));
+ PFN_UP(size_sum));
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
@@ -1224,3 +1238,89 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
return pcpu_unit_size;
}
+
+/*
+ * Embedding first chunk setup helper.
+ */
+static void *pcpue_ptr __initdata;
+static size_t pcpue_size __initdata;
+static size_t pcpue_unit_size __initdata;
+
+static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
+{
+ size_t off = (size_t)pageno << PAGE_SHIFT;
+
+ if (off >= pcpue_size)
+ return NULL;
+
+ return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
+}
+
+/**
+ * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
+ *
+ * This is a helper to ease setting up embedded first percpu chunk and
+ * can be called where pcpu_setup_first_chunk() is expected.
+ *
+ * If this function is used to setup the first chunk, it is allocated
+ * as a contiguous area using bootmem allocator and used as-is without
+ * being mapped into vmalloc area. This enables the first chunk to
+ * piggy back on the linear physical mapping which often uses larger
+ * page size.
+ *
+ * When @dyn_size is positive, dynamic area might be larger than
+ * specified to fill page alignment. Also, when @dyn_size is auto,
+ * @dyn_size does not fill the whole first chunk but only what's
+ * necessary for page alignment after static and reserved areas.
+ *
+ * If the needed size is smaller than the minimum or specified unit
+ * size, the leftover is returned to the bootmem allocator.
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access on success, -errno on failure.
+ */
+ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size)
+{
+ unsigned int cpu;
+
+ /* determine parameters and allocate */
+ pcpue_size = PFN_ALIGN(static_size + reserved_size +
+ (dyn_size >= 0 ? dyn_size : 0));
+ if (dyn_size != 0)
+ dyn_size = pcpue_size - static_size - reserved_size;
+
+ if (unit_size >= 0) {
+ BUG_ON(unit_size < pcpue_size);
+ pcpue_unit_size = unit_size;
+ } else
+ pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
+
+ pcpue_ptr = __alloc_bootmem_nopanic(
+ num_possible_cpus() * pcpue_unit_size,
+ PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ if (!pcpue_ptr)
+ return -ENOMEM;
+
+ /* return the leftover and copy */
+ for_each_possible_cpu(cpu) {
+ void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
+
+ free_bootmem(__pa(ptr + pcpue_size),
+ pcpue_unit_size - pcpue_size);
+ memcpy(ptr, __per_cpu_load, static_size);
+ }
+
+ /* we're ready, commit */
+ pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
+ pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
+
+ return pcpu_setup_first_chunk(pcpue_get_page, static_size,
+ reserved_size, dyn_size,
+ pcpue_unit_size, pcpue_ptr, NULL);
+}
diff --git a/mm/readahead.c b/mm/readahead.c
index bec83c15a78..9ce303d4b81 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -17,19 +17,6 @@
#include <linux/pagevec.h>
#include <linux/pagemap.h>
-void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-}
-EXPORT_SYMBOL(default_unplug_io_fn);
-
-struct backing_dev_info default_backing_dev_info = {
- .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
- .state = 0,
- .capabilities = BDI_CAP_MAP_COPY,
- .unplug_io_fn = default_unplug_io_fn,
-};
-EXPORT_SYMBOL_GPL(default_backing_dev_info);
-
/*
* Initialise a struct file's readahead state. Assumes that the caller has
* memset *ra to zero.
@@ -233,18 +220,6 @@ unsigned long max_sane_readahead(unsigned long nr)
+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
}
-static int __init readahead_init(void)
-{
- int err;
-
- err = bdi_init(&default_backing_dev_info);
- if (!err)
- bdi_register(&default_backing_dev_info, NULL, "default");
-
- return err;
-}
-subsys_initcall(readahead_init);
-
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
diff --git a/mm/shmem.c b/mm/shmem.c
index 4103a239ce8..d94d2e9146b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
+#include <linux/ima.h>
static struct vfsmount *shm_mnt;
@@ -1067,8 +1068,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
swap_duplicate(swap);
BUG_ON(page_mapped(page));
page_cache_release(page); /* pagecache ref */
- set_page_dirty(page);
- unlock_page(page);
+ swap_writepage(page, wbc);
if (inode) {
mutex_lock(&shmem_swaplist_mutex);
/* move instead of add in case we're racing */
@@ -2665,6 +2665,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (IS_ERR(file))
return PTR_ERR(file);
+ ima_shm_check(file);
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = file;
diff --git a/mm/slob.c b/mm/slob.c
index 596152926a8..4dd6516447f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -127,9 +127,9 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
/*
- * slob_page: True for all slob pages (false for bigblock pages)
+ * is_slob_page: True for all slob pages (false for bigblock pages)
*/
-static inline int slob_page(struct slob_page *sp)
+static inline int is_slob_page(struct slob_page *sp)
{
return PageSlobPage((struct page *)sp);
}
@@ -144,6 +144,11 @@ static inline void clear_slob_page(struct slob_page *sp)
__ClearPageSlobPage((struct page *)sp);
}
+static inline struct slob_page *slob_page(const void *addr)
+{
+ return (struct slob_page *)virt_to_page(addr);
+}
+
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
@@ -231,7 +236,7 @@ static int slob_last(slob_t *s)
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-static void *slob_new_page(gfp_t gfp, int order, int node)
+static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
@@ -248,12 +253,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
return page_address(page);
}
+static void slob_free_pages(void *b, int order)
+{
+ free_pages((unsigned long)b, order);
+}
+
/*
* Allocate a slob block within a given slob_page sp.
*/
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
- slob_t *prev, *cur, *aligned = 0;
+ slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -350,10 +360,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
+ b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
- return 0;
- sp = (struct slob_page *)virt_to_page(b);
+ return NULL;
+ sp = slob_page(b);
set_slob_page(sp);
spin_lock_irqsave(&slob_lock, flags);
@@ -385,7 +395,7 @@ static void slob_free(void *block, int size)
return;
BUG_ON(!size);
- sp = (struct slob_page *)virt_to_page(block);
+ sp = slob_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -394,10 +404,11 @@ static void slob_free(void *block, int size)
/* Go directly to page allocator. Do not pass slob allocator */
if (slob_page_free(sp))
clear_slob_page_free(sp);
+ spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
free_page((unsigned long)b);
- goto out;
+ return;
}
if (!slob_page_free(sp)) {
@@ -466,7 +477,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
void *ret;
- lockdep_trace_alloc(flags);
+ lockdep_trace_alloc(gfp);
if (size < PAGE_SIZE - align) {
if (!size)
@@ -485,7 +496,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else {
unsigned int order = get_order(size);
- ret = slob_new_page(gfp | __GFP_COMP, order, node);
+ ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
@@ -508,8 +519,8 @@ void kfree(const void *block)
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
@@ -529,8 +540,8 @@ size_t ksize(const void *block)
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
@@ -593,7 +604,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
SLOB_UNITS(c->size) * SLOB_UNIT,
flags, node);
} else {
- b = slob_new_page(flags, get_order(c->size), node);
+ b = slob_new_pages(flags, get_order(c->size), node);
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
_RET_IP_, b, c->size,
PAGE_SIZE << get_order(c->size),
@@ -612,7 +623,7 @@ static void __kmem_cache_free(void *b, int size)
if (size < PAGE_SIZE)
slob_free(b, size);
else
- free_pages((unsigned long)b, get_order(size));
+ slob_free_pages(b, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slub.c b/mm/slub.c
index 816734ed8aa..7aaa121d0ea 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -375,14 +375,8 @@ static struct track *get_track(struct kmem_cache *s, void *object,
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
- struct track *p;
-
- if (s->offset)
- p = object + s->offset + sizeof(void *);
- else
- p = object + s->inuse;
+ struct track *p = get_track(s, object, alloc);
- p += alloc;
if (addr) {
p->addr = addr;
p->cpu = smp_processor_id();
@@ -1336,7 +1330,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
n = get_node(s, zone_to_nid(zone));
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
- n->nr_partial > n->min_partial) {
+ n->nr_partial > s->min_partial) {
page = get_partial_node(n);
if (page)
return page;
@@ -1388,7 +1382,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock(page);
} else {
stat(c, DEACTIVATE_EMPTY);
- if (n->nr_partial < n->min_partial) {
+ if (n->nr_partial < s->min_partial) {
/*
* Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead. This slab needs
@@ -1754,7 +1748,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(object, s->objsize);
+ debug_check_no_obj_freed(object, c->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
@@ -1876,6 +1870,7 @@ static inline int calculate_order(int size)
int order;
int min_objects;
int fraction;
+ int max_objects;
/*
* Attempt to find best configuration for a slab. This
@@ -1888,6 +1883,9 @@ static inline int calculate_order(int size)
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
+ max_objects = (PAGE_SIZE << slub_max_order)/size;
+ min_objects = min(min_objects, max_objects);
+
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
@@ -1897,7 +1895,7 @@ static inline int calculate_order(int size)
return order;
fraction /= 2;
}
- min_objects /= 2;
+ min_objects --;
}
/*
@@ -1960,17 +1958,6 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
n->nr_partial = 0;
-
- /*
- * The larger the object size is, the more pages we want on the partial
- * list to avoid pounding the page allocator excessively.
- */
- n->min_partial = ilog2(s->size);
- if (n->min_partial < MIN_PARTIAL)
- n->min_partial = MIN_PARTIAL;
- else if (n->min_partial > MAX_PARTIAL)
- n->min_partial = MAX_PARTIAL;
-
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
@@ -2213,6 +2200,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
}
#endif
+static void set_min_partial(struct kmem_cache *s, unsigned long min)
+{
+ if (min < MIN_PARTIAL)
+ min = MIN_PARTIAL;
+ else if (min > MAX_PARTIAL)
+ min = MAX_PARTIAL;
+ s->min_partial = min;
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
@@ -2351,6 +2347,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (!calculate_sizes(s, -1))
goto error;
+ /*
+ * The larger the object size is, the more pages we want on the partial
+ * list to avoid pounding the page allocator excessively.
+ */
+ set_min_partial(s, ilog2(s->size));
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
@@ -3904,6 +3905,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR(order);
+static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%lu\n", s->min_partial);
+}
+
+static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
+ size_t length)
+{
+ unsigned long min;
+ int err;
+
+ err = strict_strtoul(buf, 10, &min);
+ if (err)
+ return err;
+
+ set_min_partial(s, min);
+ return length;
+}
+SLAB_ATTR(min_partial);
+
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (s->ctor) {
@@ -4219,6 +4240,7 @@ static struct attribute *slab_attrs[] = {
&object_size_attr.attr,
&objs_per_slab_attr.attr,
&order_attr.attr,
+ &min_partial_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
&total_objects_attr.attr,
diff --git a/mm/sparse.c b/mm/sparse.c
index 083f5b63e7a..da432d9f0ae 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -164,9 +164,7 @@ void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
WARN_ON_ONCE(1);
*start_pfn = max_sparsemem_pfn;
*end_pfn = max_sparsemem_pfn;
- }
-
- if (*end_pfn > max_sparsemem_pfn) {
+ } else if (*end_pfn > max_sparsemem_pfn) {
mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
*start_pfn, *end_pfn, max_sparsemem_pfn);
diff --git a/mm/swap.c b/mm/swap.c
index 8adb9feb61e..6e83084c1f6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -457,29 +457,6 @@ void pagevec_strip(struct pagevec *pvec)
}
/**
- * pagevec_swap_free - try to free swap space from the pages in a pagevec
- * @pvec: pagevec with swapcache pages to free the swap space of
- *
- * The caller needs to hold an extra reference to each page and
- * not hold the page lock on the pages. This function uses a
- * trylock on the page lock so it may not always free the swap
- * space associated with a page.
- */
-void pagevec_swap_free(struct pagevec *pvec)
-{
- int i;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
-
- if (PageSwapCache(page) && trylock_page(page)) {
- try_to_free_swap(page);
- unlock_page(page);
- }
- }
-}
-
-/**
* pagevec_lookup - gang pagecache lookup
* @pvec: Where the resulting pages are placed
* @mapping: The address_space to search
diff --git a/mm/util.c b/mm/util.c
index 37eaccdf305..7c122e49f76 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -70,6 +70,36 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
EXPORT_SYMBOL(kmemdup);
/**
+ * memdup_user - duplicate memory region from user space
+ *
+ * @src: source address in user space
+ * @len: number of bytes to copy
+ *
+ * Returns an ERR_PTR() on failure.
+ */
+void *memdup_user(const void __user *src, size_t len)
+{
+ void *p;
+
+ /*
+ * Always use GFP_KERNEL, since copy_from_user() can sleep and
+ * cause pagefault, which makes it pointless to use GFP_NOFS
+ * or GFP_ATOMIC.
+ */
+ p = kmalloc_track_caller(len, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(p, src, len)) {
+ kfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return p;
+}
+EXPORT_SYMBOL(memdup_user);
+
+/**
* __krealloc - like krealloc() but don't free @p.
* @p: object to reallocate memory for.
* @new_size: how many bytes of memory are required.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index af58324c361..fab19876b4d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -671,10 +671,7 @@ struct vmap_block {
DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
union {
- struct {
- struct list_head free_list;
- struct list_head dirty_list;
- };
+ struct list_head free_list;
struct rcu_head rcu_head;
};
};
@@ -741,7 +738,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
INIT_LIST_HEAD(&vb->free_list);
- INIT_LIST_HEAD(&vb->dirty_list);
vb_idx = addr_to_vb_idx(va->va_start);
spin_lock(&vmap_block_tree_lock);
@@ -772,12 +768,7 @@ static void free_vmap_block(struct vmap_block *vb)
struct vmap_block *tmp;
unsigned long vb_idx;
- spin_lock(&vb->vbq->lock);
- if (!list_empty(&vb->free_list))
- list_del(&vb->free_list);
- if (!list_empty(&vb->dirty_list))
- list_del(&vb->dirty_list);
- spin_unlock(&vb->vbq->lock);
+ BUG_ON(!list_empty(&vb->free_list));
vb_idx = addr_to_vb_idx(vb->va->va_start);
spin_lock(&vmap_block_tree_lock);
@@ -862,11 +853,7 @@ static void vb_free(const void *addr, unsigned long size)
spin_lock(&vb->lock);
bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
- if (!vb->dirty) {
- spin_lock(&vb->vbq->lock);
- list_add(&vb->dirty_list, &vb->vbq->dirty);
- spin_unlock(&vb->vbq->lock);
- }
+
vb->dirty += 1UL << order;
if (vb->dirty == VMAP_BBMAP_BITS) {
BUG_ON(vb->free || !list_empty(&vb->free_list));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 479e4671939..06e72693b45 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -60,8 +60,8 @@ struct scan_control {
int may_writepage;
- /* Can pages be swapped as part of reclaim? */
- int may_swap;
+ /* Can mapped pages be reclaimed? */
+ int may_unmap;
/* This context's SWAP_CLUSTER_MAX. If freeing memory for
* suspend, we effectively ignore SWAP_CLUSTER_MAX.
@@ -78,6 +78,12 @@ struct scan_control {
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
+ /*
+ * Nodemask of nodes allowed by the caller. If NULL, all nodes
+ * are scanned.
+ */
+ nodemask_t *nodemask;
+
/* Pluggable isolate pages callback */
unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
unsigned long *scanned, int order, int mode,
@@ -214,8 +220,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
do_div(delta, lru_pages + 1);
shrinker->nr += delta;
if (shrinker->nr < 0) {
- printk(KERN_ERR "%s: nr=%ld\n",
- __func__, shrinker->nr);
+ printk(KERN_ERR "shrink_slab: %pF negative objects to "
+ "delete nr=%ld\n",
+ shrinker->shrink, shrinker->nr);
shrinker->nr = max_pass;
}
@@ -606,7 +613,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(!page_evictable(page, NULL)))
goto cull_mlocked;
- if (!sc->may_swap && page_mapped(page))
+ if (!sc->may_unmap && page_mapped(page))
goto keep_locked;
/* Double the slab pressure for mapped and swapcache pages */
@@ -1298,17 +1305,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
pgdeactivate += pgmoved;
- if (buffer_heads_over_limit) {
- spin_unlock_irq(&zone->lru_lock);
- pagevec_strip(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);
- if (vm_swap_full())
- pagevec_swap_free(&pvec);
-
+ if (buffer_heads_over_limit)
+ pagevec_strip(&pvec);
pagevec_release(&pvec);
}
@@ -1543,7 +1544,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
struct zone *zone;
sc->all_unreclaimable = 1;
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+ sc->nodemask) {
if (!populated_zone(zone))
continue;
/*
@@ -1688,17 +1690,18 @@ out:
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, nodemask_t *nodemask)
{
struct scan_control sc = {
.gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX,
- .may_swap = 1,
+ .may_unmap = 1,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
.isolate_pages = isolate_pages_global,
+ .nodemask = nodemask,
};
return do_try_to_free_pages(zonelist, &sc);
@@ -1713,17 +1716,18 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{
struct scan_control sc = {
.may_writepage = !laptop_mode,
- .may_swap = 1,
+ .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
+ .nodemask = NULL, /* we don't care the placement */
};
struct zonelist *zonelist;
if (noswap)
- sc.may_swap = 0;
+ sc.may_unmap = 0;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -1762,7 +1766,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
- .may_swap = 1,
+ .may_unmap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
.order = order,
@@ -2050,22 +2054,19 @@ unsigned long global_lru_pages(void)
#ifdef CONFIG_PM
/*
* Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
+ * from LRU lists system-wide, for given pass and priority.
*
* For pass > 3 we also try to shrink the LRU lists that contain a few pages
*/
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+static void shrink_all_zones(unsigned long nr_pages, int prio,
int pass, struct scan_control *sc)
{
struct zone *zone;
- unsigned long ret = 0;
+ unsigned long nr_reclaimed = 0;
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
enum lru_list l;
- if (!populated_zone(zone))
- continue;
if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
continue;
@@ -2084,14 +2085,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
zone->lru[l].nr_scan = 0;
nr_to_scan = min(nr_pages, lru_pages);
- ret += shrink_list(l, nr_to_scan, zone,
+ nr_reclaimed += shrink_list(l, nr_to_scan, zone,
sc, prio);
- if (ret >= nr_pages)
- return ret;
+ if (nr_reclaimed >= nr_pages) {
+ sc->nr_reclaimed = nr_reclaimed;
+ return;
+ }
}
}
}
- return ret;
+ sc->nr_reclaimed = nr_reclaimed;
}
/*
@@ -2105,13 +2108,11 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
unsigned long shrink_all_memory(unsigned long nr_pages)
{
unsigned long lru_pages, nr_slab;
- unsigned long ret = 0;
int pass;
struct reclaim_state reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
- .may_swap = 0,
- .swap_cluster_max = nr_pages,
+ .may_unmap = 0,
.may_writepage = 1,
.isolate_pages = isolate_pages_global,
};
@@ -2127,8 +2128,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
if (!reclaim_state.reclaimed_slab)
break;
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
nr_slab -= reclaim_state.reclaimed_slab;
@@ -2147,21 +2148,22 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
/* Force reclaiming mapped pages in the passes #3 and #4 */
if (pass > 2)
- sc.may_swap = 1;
+ sc.may_unmap = 1;
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
- unsigned long nr_to_scan = nr_pages - ret;
+ unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
sc.nr_scanned = 0;
- ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
- if (ret >= nr_pages)
+ sc.swap_cluster_max = nr_to_scan;
+ shrink_all_zones(nr_to_scan, prio, pass, &sc);
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
reclaim_state.reclaimed_slab = 0;
shrink_slab(sc.nr_scanned, sc.gfp_mask,
global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- if (ret >= nr_pages)
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ if (sc.nr_reclaimed >= nr_pages)
goto out;
if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
@@ -2170,21 +2172,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
}
/*
- * If ret = 0, we could not shrink LRUs, but there may be something
- * in slab caches
+ * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
+ * something in slab caches
*/
- if (!ret) {
+ if (!sc.nr_reclaimed) {
do {
reclaim_state.reclaimed_slab = 0;
shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
- ret += reclaim_state.reclaimed_slab;
- } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+ sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+ } while (sc.nr_reclaimed < nr_pages &&
+ reclaim_state.reclaimed_slab > 0);
}
+
out:
current->reclaim_state = NULL;
- return ret;
+ return sc.nr_reclaimed;
}
#endif
@@ -2290,11 +2294,12 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int priority;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
- .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+ .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
+ .order = order,
.isolate_pages = isolate_pages_global,
};
unsigned long slab_reclaimable;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 91149746bb8..9826766f127 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -27,7 +27,7 @@ static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
- for_each_cpu_mask_nr(cpu, *cpumask) {
+ for_each_cpu(cpu, cpumask) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
@@ -135,11 +135,7 @@ static void refresh_zone_stat_thresholds(void)
int cpu;
int threshold;
- for_each_zone(zone) {
-
- if (!zone->present_pages)
- continue;
-
+ for_each_populated_zone(zone) {
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
@@ -301,12 +297,9 @@ void refresh_cpu_vm_stats(int cpu)
int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
- if (!populated_zone(zone))
- continue;
-
p = zone_pcp(zone, cpu);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)