diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 4 | ||||
-rw-r--r-- | lib/Kconfig.debug | 8 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/cpu_rmap.c | 269 | ||||
-rw-r--r-- | lib/list_debug.c | 39 | ||||
-rw-r--r-- | lib/nlattr.c | 2 | ||||
-rw-r--r-- | lib/radix-tree.c | 7 | ||||
-rw-r--r-- | lib/rbtree.c | 3 | ||||
-rw-r--r-- | lib/swiotlb.c | 6 | ||||
-rw-r--r-- | lib/textsearch.c | 10 | ||||
-rw-r--r-- | lib/xz/Kconfig | 12 |
11 files changed, 328 insertions, 34 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index c70f2a4db35..3a55a43c43e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS depends on EXPERIMENTAL && BROKEN +config CPU_RMAP + bool + depends on SMP + # # Netlink attribute parsing support is select'ed if needed # diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2d05adb9840..2b97418c67e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -657,7 +657,7 @@ config DEBUG_HIGHMEM Disable for production systems. config DEBUG_BUGVERBOSE - bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED + bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT depends on BUG depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 @@ -729,8 +729,8 @@ config DEBUG_WRITECOUNT If unsure, say N. config DEBUG_MEMORY_INIT - bool "Debug memory initialisation" if EMBEDDED - default !EMBEDDED + bool "Debug memory initialisation" if EXPERT + default !EXPERT help Enable this for additional checks during memory initialisation. The sanity checks verify aspects of the VM such as the memory model @@ -805,7 +805,7 @@ config ARCH_WANT_FRAME_POINTERS config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ - (CRIS || M68K || M68KNOMMU || FRV || UML || \ + (CRIS || M68K || FRV || UML || \ AVR32 || SUPERH || BLACKFIN || MN10300) || \ ARCH_WANT_FRAME_POINTERS default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS diff --git a/lib/Makefile b/lib/Makefile index cbb774f7d41..b73ba01a818 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -110,6 +110,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o obj-$(CONFIG_AVERAGE) += average.o +obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c new file mode 100644 index 00000000000..987acfafeb8 --- /dev/null +++ b/lib/cpu_rmap.c @@ -0,0 +1,269 @@ +/* + * cpu_rmap.c: CPU affinity reverse-map support + * Copyright 2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include <linux/cpu_rmap.h> +#ifdef CONFIG_GENERIC_HARDIRQS +#include <linux/interrupt.h> +#endif +#include <linux/module.h> + +/* + * These functions maintain a mapping from CPUs to some ordered set of + * objects with CPU affinities. This can be seen as a reverse-map of + * CPU affinity. However, we do not assume that the object affinities + * cover all CPUs in the system. For those CPUs not directly covered + * by object affinities, we attempt to find a nearest object based on + * CPU topology. + */ + +/** + * alloc_cpu_rmap - allocate CPU affinity reverse-map + * @size: Number of objects to be mapped + * @flags: Allocation flags e.g. %GFP_KERNEL + */ +struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) +{ + struct cpu_rmap *rmap; + unsigned int cpu; + size_t obj_offset; + + /* This is a silly number of objects, and we use u16 indices. */ + if (size > 0xffff) + return NULL; + + /* Offset of object pointer array from base structure */ + obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]), + sizeof(void *)); + + rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags); + if (!rmap) + return NULL; + + rmap->obj = (void **)((char *)rmap + obj_offset); + + /* Initially assign CPUs to objects on a rota, since we have + * no idea where the objects are. Use infinite distance, so + * any object with known distance is preferable. Include the + * CPUs that are not present/online, since we definitely want + * any newly-hotplugged CPUs to have some object assigned. + */ + for_each_possible_cpu(cpu) { + rmap->near[cpu].index = cpu % size; + rmap->near[cpu].dist = CPU_RMAP_DIST_INF; + } + + rmap->size = size; + return rmap; +} +EXPORT_SYMBOL(alloc_cpu_rmap); + +/* Reevaluate nearest object for given CPU, comparing with the given + * neighbours at the given distance. + */ +static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, + const struct cpumask *mask, u16 dist) +{ + int neigh; + + for_each_cpu(neigh, mask) { + if (rmap->near[cpu].dist > dist && + rmap->near[neigh].dist <= dist) { + rmap->near[cpu].index = rmap->near[neigh].index; + rmap->near[cpu].dist = dist; + return true; + } + } + return false; +} + +#ifdef DEBUG +static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) +{ + unsigned index; + unsigned int cpu; + + pr_info("cpu_rmap %p, %s:\n", rmap, prefix); + + for_each_possible_cpu(cpu) { + index = rmap->near[cpu].index; + pr_info("cpu %d -> obj %u (distance %u)\n", + cpu, index, rmap->near[cpu].dist); + } +} +#else +static inline void +debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) +{ +} +#endif + +/** + * cpu_rmap_add - add object to a rmap + * @rmap: CPU rmap allocated with alloc_cpu_rmap() + * @obj: Object to add to rmap + * + * Return index of object. + */ +int cpu_rmap_add(struct cpu_rmap *rmap, void *obj) +{ + u16 index; + + BUG_ON(rmap->used >= rmap->size); + index = rmap->used++; + rmap->obj[index] = obj; + return index; +} +EXPORT_SYMBOL(cpu_rmap_add); + +/** + * cpu_rmap_update - update CPU rmap following a change of object affinity + * @rmap: CPU rmap to update + * @index: Index of object whose affinity changed + * @affinity: New CPU affinity of object + */ +int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, + const struct cpumask *affinity) +{ + cpumask_var_t update_mask; + unsigned int cpu; + + if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL))) + return -ENOMEM; + + /* Invalidate distance for all CPUs for which this used to be + * the nearest object. Mark those CPUs for update. + */ + for_each_online_cpu(cpu) { + if (rmap->near[cpu].index == index) { + rmap->near[cpu].dist = CPU_RMAP_DIST_INF; + cpumask_set_cpu(cpu, update_mask); + } + } + + debug_print_rmap(rmap, "after invalidating old distances"); + + /* Set distance to 0 for all CPUs in the new affinity mask. + * Mark all CPUs within their NUMA nodes for update. + */ + for_each_cpu(cpu, affinity) { + rmap->near[cpu].index = index; + rmap->near[cpu].dist = 0; + cpumask_or(update_mask, update_mask, + cpumask_of_node(cpu_to_node(cpu))); + } + + debug_print_rmap(rmap, "after updating neighbours"); + + /* Update distances based on topology */ + for_each_cpu(cpu, update_mask) { + if (cpu_rmap_copy_neigh(rmap, cpu, + topology_thread_cpumask(cpu), 1)) + continue; + if (cpu_rmap_copy_neigh(rmap, cpu, + topology_core_cpumask(cpu), 2)) + continue; + if (cpu_rmap_copy_neigh(rmap, cpu, + cpumask_of_node(cpu_to_node(cpu)), 3)) + continue; + /* We could continue into NUMA node distances, but for now + * we give up. + */ + } + + debug_print_rmap(rmap, "after copying neighbours"); + + free_cpumask_var(update_mask); + return 0; +} +EXPORT_SYMBOL(cpu_rmap_update); + +#ifdef CONFIG_GENERIC_HARDIRQS + +/* Glue between IRQ affinity notifiers and CPU rmaps */ + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +/** + * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs + * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL + * + * Must be called in process context, before freeing the IRQs, and + * without holding any locks required by global workqueue items. + */ +void free_irq_cpu_rmap(struct cpu_rmap *rmap) +{ + struct irq_glue *glue; + u16 index; + + if (!rmap) + return; + + for (index = 0; index < rmap->used; index++) { + glue = rmap->obj[index]; + irq_set_affinity_notifier(glue->notify.irq, NULL); + } + irq_run_affinity_notifiers(); + + kfree(rmap); +} +EXPORT_SYMBOL(free_irq_cpu_rmap); + +static void +irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct irq_glue *glue = + container_of(notify, struct irq_glue, notify); + int rc; + + rc = cpu_rmap_update(glue->rmap, glue->index, mask); + if (rc) + pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); +} + +static void irq_cpu_rmap_release(struct kref *ref) +{ + struct irq_glue *glue = + container_of(ref, struct irq_glue, notify.kref); + kfree(glue); +} + +/** + * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map + * @rmap: The reverse-map + * @irq: The IRQ number + * + * This adds an IRQ affinity notifier that will update the reverse-map + * automatically. + * + * Must be called in process context, after the IRQ is allocated but + * before it is bound with request_irq(). + */ +int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) +{ + struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL); + int rc; + + if (!glue) + return -ENOMEM; + glue->notify.notify = irq_cpu_rmap_notify; + glue->notify.release = irq_cpu_rmap_release; + glue->rmap = rmap; + glue->index = cpu_rmap_add(rmap, glue); + rc = irq_set_affinity_notifier(irq, &glue->notify); + if (rc) + kfree(glue); + return rc; +} +EXPORT_SYMBOL(irq_cpu_rmap_add); + +#endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/lib/list_debug.c b/lib/list_debug.c index 344c710d16c..b8029a5583f 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -35,6 +35,31 @@ void __list_add(struct list_head *new, } EXPORT_SYMBOL(__list_add); +void __list_del_entry(struct list_head *entry) +{ + struct list_head *prev, *next; + + prev = entry->prev; + next = entry->next; + + if (WARN(next == LIST_POISON1, + "list_del corruption, %p->next is LIST_POISON1 (%p)\n", + entry, LIST_POISON1) || + WARN(prev == LIST_POISON2, + "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", + entry, LIST_POISON2) || + WARN(prev->next != entry, + "list_del corruption. prev->next should be %p, " + "but was %p\n", entry, prev->next) || + WARN(next->prev != entry, + "list_del corruption. next->prev should be %p, " + "but was %p\n", entry, next->prev)) + return; + + __list_del(prev, next); +} +EXPORT_SYMBOL(__list_del_entry); + /** * list_del - deletes entry from list. * @entry: the element to delete from the list. @@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add); */ void list_del(struct list_head *entry) { - WARN(entry->next == LIST_POISON1, - "list_del corruption, next is LIST_POISON1 (%p)\n", - LIST_POISON1); - WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, - "list_del corruption, prev is LIST_POISON2 (%p)\n", - LIST_POISON2); - WARN(entry->prev->next != entry, - "list_del corruption. prev->next should be %p, " - "but was %p\n", entry, entry->prev->next); - WARN(entry->next->prev != entry, - "list_del corruption. next->prev should be %p, " - "but was %p\n", entry, entry->next->prev); - __list_del(entry->prev, entry->next); + __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } diff --git a/lib/nlattr.c b/lib/nlattr.c index 5021cbc3441..ac09f2226dc 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; - for (i = 0; i < n; i++) { + for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_minlen[p->type]) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 5086bb962b4..7ea2e033d71 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -736,10 +736,11 @@ next: } } /* - * The iftag must have been set somewhere because otherwise - * we would return immediated at the beginning of the function + * We need not to tag the root tag if there is no tag which is set with + * settag within the range from *first_indexp to last_index. */ - root_tag_set(root, settag); + if (tagged > 0) + root_tag_set(root, settag); *first_indexp = index; return tagged; diff --git a/lib/rbtree.c b/lib/rbtree.c index 4693f79195d..a16be19a130 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -315,6 +315,7 @@ void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) rb_augment_path(node, func, data); } +EXPORT_SYMBOL(rb_augment_insert); /* * before removing the node, find the deepest node on the rebalance path @@ -340,6 +341,7 @@ struct rb_node *rb_augment_erase_begin(struct rb_node *node) return deepest; } +EXPORT_SYMBOL(rb_augment_erase_begin); /* * after removal, update the tree to account for the removed entry @@ -350,6 +352,7 @@ void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) if (node) rb_augment_path(node, func, data); } +EXPORT_SYMBOL(rb_augment_erase_end); /* * This function returns the first node (in sort order) of the tree. diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c47bbe11b80..93ca08b8a45 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (!dma_capable(dev, dev_addr, size)) - panic("map_single: bounce buffer is not DMA'ble"); + if (!dma_capable(dev, dev_addr, size)) { + swiotlb_tbl_unmap_single(dev, map, size, dir); + dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); + } return dev_addr; } diff --git a/lib/textsearch.c b/lib/textsearch.c index d608331b3e4..e0cc0146ae6 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -13,7 +13,7 @@ * * INTRODUCTION * - * The textsearch infrastructure provides text searching facitilies for + * The textsearch infrastructure provides text searching facilities for * both linear and non-linear data. Individual search algorithms are * implemented in modules and chosen by the user. * @@ -43,7 +43,7 @@ * to the algorithm to store persistent variables. * (4) Core eventually resets the search offset and forwards the find() * request to the algorithm. - * (5) Algorithm calls get_next_block() provided by the user continously + * (5) Algorithm calls get_next_block() provided by the user continuously * to fetch the data to be searched in block by block. * (6) Algorithm invokes finish() after the last call to get_next_block * to clean up any leftovers from get_next_block. (Optional) @@ -58,15 +58,15 @@ * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE * to perform case insensitive matching. But it might slow down * performance of algorithm, so you should use it at own your risk. - * The returned configuration may then be used for an arbitary + * The returned configuration may then be used for an arbitrary * amount of times and even in parallel as long as a separate struct * ts_state variable is provided to every instance. * * The actual search is performed by either calling textsearch_find_- * continuous() for linear data or by providing an own get_next_block() * implementation and calling textsearch_find(). Both functions return - * the position of the first occurrence of the patern or UINT_MAX if - * no match was found. Subsequent occurences can be found by calling + * the position of the first occurrence of the pattern or UINT_MAX if + * no match was found. Subsequent occurrences can be found by calling * textsearch_next() regardless of the linearity of the data. * * Once you're done using a configuration it must be given back via diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index e3b6e18fdac..60a6088d0e5 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -7,37 +7,37 @@ config XZ_DEC CRC32 is supported. See Documentation/xz.txt for more information. config XZ_DEC_X86 - bool "x86 BCJ filter decoder" if EMBEDDED + bool "x86 BCJ filter decoder" if EXPERT default y depends on XZ_DEC select XZ_DEC_BCJ config XZ_DEC_POWERPC - bool "PowerPC BCJ filter decoder" if EMBEDDED + bool "PowerPC BCJ filter decoder" if EXPERT default y depends on XZ_DEC select XZ_DEC_BCJ config XZ_DEC_IA64 - bool "IA-64 BCJ filter decoder" if EMBEDDED + bool "IA-64 BCJ filter decoder" if EXPERT default y depends on XZ_DEC select XZ_DEC_BCJ config XZ_DEC_ARM - bool "ARM BCJ filter decoder" if EMBEDDED + bool "ARM BCJ filter decoder" if EXPERT default y depends on XZ_DEC select XZ_DEC_BCJ config XZ_DEC_ARMTHUMB - bool "ARM-Thumb BCJ filter decoder" if EMBEDDED + bool "ARM-Thumb BCJ filter decoder" if EXPERT default y depends on XZ_DEC select XZ_DEC_BCJ config XZ_DEC_SPARC - bool "SPARC BCJ filter decoder" if EMBEDDED + bool "SPARC BCJ filter decoder" if EXPERT default y depends on XZ_DEC select XZ_DEC_BCJ |