summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-12 12:43:21 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-12 12:43:21 +0200
commitacbaa41a780490c791492c41144c774c04875af1 (patch)
tree31f1f046875eb071e2aed031e5d9d1584742314f /lib
parent8d89adf44cf750e49691ba5b744b2ad77a05e997 (diff)
parentfd048088306656824958e7783ffcee27e241b361 (diff)
Merge branch 'linus' into x86/quirks
Conflicts: arch/x86/kernel/early-quirks.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug48
-rw-r--r--lib/Makefile3
-rw-r--r--lib/iommu-helper.c5
-rw-r--r--lib/klist.c96
-rw-r--r--lib/percpu_counter.c8
-rw-r--r--lib/string_helpers.c64
-rw-r--r--lib/swiotlb.c49
7 files changed, 208 insertions, 65 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0b504814e37..ce697e0b319 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -597,6 +597,19 @@ config RCU_TORTURE_TEST_RUNNABLE
Say N here if you want the RCU torture tests to start only
after being manually enabled via /proc.
+config RCU_CPU_STALL_DETECTOR
+ bool "Check for stalled CPUs delaying RCU grace periods"
+ depends on CLASSIC_RCU
+ default n
+ help
+ This option causes RCU to printk information on which
+ CPUs are delaying the current grace period, but only when
+ the grace period extends for excessive time periods.
+
+ Say Y if you want RCU to perform such checks.
+
+ Say N if you are unsure.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -624,6 +637,28 @@ config BACKTRACE_SELF_TEST
Say N if you are unsure.
+config DEBUG_BLOCK_EXT_DEVT
+ bool "Force extended block device numbers and spread them"
+ depends on DEBUG_KERNEL
+ depends on BLOCK
+ default n
+ help
+ Conventionally, block device numbers are allocated from
+ predetermined contiguous area. However, extended block area
+ may introduce non-contiguous block device numbers. This
+ option forces most block device numbers to be allocated from
+ the extended space and spreads them to discover kernel or
+ userland code paths which assume predetermined contiguous
+ device number allocation.
+
+ Note that turning on this debug option shuffles all the
+ device numbers for all IDE and SCSI devices including libata
+ ones, so root partition specified using device number
+ directly (via rdev or root=MAJ:MIN) won't work anymore.
+ Textual device names (root=/dev/sdXn) will continue to work.
+
+ Say N if you are unsure.
+
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
@@ -661,10 +696,21 @@ config FAIL_PAGE_ALLOC
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
- depends on FAULT_INJECTION
+ depends on FAULT_INJECTION && BLOCK
help
Provide fault-injection capability for disk IO.
+config FAIL_IO_TIMEOUT
+ bool "Faul-injection capability for faking disk interrupts"
+ depends on FAULT_INJECTION && BLOCK
+ help
+ Provide fault-injection capability on end IO handling. This
+ will make the block layer "forget" an interrupt as configured,
+ thus exercising the error handling.
+
+ Only works with drivers that use the generic timeout handling,
+ for others it wont do anything.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index 3b1f94bbe9d..44001af76a7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
- bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
+ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
+ string_helpers.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77..5d90074dca7 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -30,8 +30,7 @@ again:
return index;
}
-static inline void set_bit_area(unsigned long *map, unsigned long i,
- int len)
+void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
{
unsigned long end = i + len;
while (i < end) {
@@ -64,7 +63,7 @@ again:
start = index + 1;
goto again;
}
- set_bit_area(map, index, nr);
+ iommu_area_reserve(map, index, nr);
}
return index;
}
diff --git a/lib/klist.c b/lib/klist.c
index cca37f96faa..bbdd3015c2c 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -37,6 +37,37 @@
#include <linux/klist.h>
#include <linux/module.h>
+/*
+ * Use the lowest bit of n_klist to mark deleted nodes and exclude
+ * dead ones from iteration.
+ */
+#define KNODE_DEAD 1LU
+#define KNODE_KLIST_MASK ~KNODE_DEAD
+
+static struct klist *knode_klist(struct klist_node *knode)
+{
+ return (struct klist *)
+ ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
+}
+
+static bool knode_dead(struct klist_node *knode)
+{
+ return (unsigned long)knode->n_klist & KNODE_DEAD;
+}
+
+static void knode_set_klist(struct klist_node *knode, struct klist *klist)
+{
+ knode->n_klist = klist;
+ /* no knode deserves to start its life dead */
+ WARN_ON(knode_dead(knode));
+}
+
+static void knode_kill(struct klist_node *knode)
+{
+ /* and no knode should die twice ever either, see we're very humane */
+ WARN_ON(knode_dead(knode));
+ *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
+}
/**
* klist_init - Initialize a klist structure.
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n)
INIT_LIST_HEAD(&n->n_node);
init_completion(&n->n_removed);
kref_init(&n->n_ref);
- n->n_klist = k;
+ knode_set_klist(n, k);
if (k->get)
k->get(n);
}
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
*/
void klist_add_after(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after);
*/
void klist_add_before(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref)
{
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
+ WARN_ON(!knode_dead(n));
list_del(&n->n_node);
complete(&n->n_removed);
- n->n_klist = NULL;
+ knode_set_klist(n, NULL);
}
static int klist_dec_and_del(struct klist_node *n)
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n)
return kref_put(&n->n_ref, klist_release);
}
-/**
- * klist_del - Decrement the reference count of node and try to remove.
- * @n: node we're deleting.
- */
-void klist_del(struct klist_node *n)
+static void klist_put(struct klist_node *n, bool kill)
{
- struct klist *k = n->n_klist;
+ struct klist *k = knode_klist(n);
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
+ if (kill)
+ knode_kill(n);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
+
+/**
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
+ */
+void klist_del(struct klist_node *n)
+{
+ klist_put(n, true);
+}
EXPORT_SYMBOL_GPL(klist_del);
/**
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
- i->i_head = &k->k_list;
i->i_cur = n;
if (n)
kref_get(&n->n_ref);
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init);
void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
- klist_del(i->i_cur);
+ klist_put(i->i_cur, false);
i->i_cur = NULL;
}
}
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n)
*/
struct klist_node *klist_next(struct klist_iter *i)
{
- struct list_head *next;
- struct klist_node *lnode = i->i_cur;
- struct klist_node *knode = NULL;
void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *next;
spin_lock(&i->i_klist->k_lock);
- if (lnode) {
- next = lnode->n_node.next;
- if (!klist_dec_and_del(lnode))
+
+ if (last) {
+ next = to_klist_node(last->n_node.next);
+ if (!klist_dec_and_del(last))
put = NULL;
} else
- next = i->i_head->next;
+ next = to_klist_node(i->i_klist->k_list.next);
- if (next != i->i_head) {
- knode = to_klist_node(next);
- kref_get(&knode->n_ref);
+ i->i_cur = NULL;
+ while (next != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(next))) {
+ kref_get(&next->n_ref);
+ i->i_cur = next;
+ break;
+ }
+ next = to_klist_node(next->n_node.next);
}
- i->i_cur = knode;
+
spin_unlock(&i->i_klist->k_lock);
- if (put && lnode)
- put(lnode);
- return knode;
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_next);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 4a8ba4bf5f6..a8663890a88 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
- if (set)
- *pcount = 0;
+ *pcount = 0;
}
- if (set)
- fbc->count = ret;
+ fbc->count = ret;
spin_unlock(&fbc->lock);
return ret;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
new file mode 100644
index 00000000000..8347925030f
--- /dev/null
+++ b/lib/string_helpers.c
@@ -0,0 +1,64 @@
+/*
+ * Helpers for formatting and printing strings
+ *
+ * Copyright 31 August 2008 James Bottomley
+ */
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/string_helpers.h>
+
+/**
+ * string_get_size - get the size in the specified units
+ * @size: The size to be converted
+ * @units: units to use (powers of 1000 or 1024)
+ * @buf: buffer to format to
+ * @len: length of buffer
+ *
+ * This function returns a string formatted to 3 significant figures
+ * giving the size in the required units. Returns 0 on success or
+ * error on failure. @buf is always zero terminated.
+ *
+ */
+int string_get_size(u64 size, const enum string_size_units units,
+ char *buf, int len)
+{
+ const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB",
+ "EB", "ZB", "YB", NULL};
+ const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
+ "EiB", "ZiB", "YiB", NULL };
+ const char **units_str[] = {
+ [STRING_UNITS_10] = units_10,
+ [STRING_UNITS_2] = units_2,
+ };
+ const int divisor[] = {
+ [STRING_UNITS_10] = 1000,
+ [STRING_UNITS_2] = 1024,
+ };
+ int i, j;
+ u64 remainder = 0, sf_cap;
+ char tmp[8];
+
+ tmp[0] = '\0';
+
+ for (i = 0; size > divisor[units] && units_str[units][i]; i++)
+ remainder = do_div(size, divisor[units]);
+
+ sf_cap = size;
+ for (j = 0; sf_cap*10 < 1000; j++)
+ sf_cap *= 10;
+
+ if (j) {
+ remainder *= 1000;
+ do_div(remainder, divisor[units]);
+ snprintf(tmp, sizeof(tmp), ".%03lld",
+ (unsigned long long)remainder);
+ tmp[j+1] = '\0';
+ }
+
+ snprintf(buf, len, "%lld%s%s", (unsigned long long)size,
+ tmp, units_str[units][i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(string_get_size);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8826fdf0f18..f8eebd48914 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,14 @@ cleanup1:
}
static int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
- dma_addr_t mask = 0xffffffff;
- /* If the device has a mask, use it, otherwise default to 32 bits */
- if (hwdev && hwdev->dma_mask)
- mask = *hwdev->dma_mask;
- return (addr & ~mask) != 0;
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+}
+
+static int is_swiotlb_buffer(char *addr)
+{
+ return addr >= io_tlb_start && addr < io_tlb_end;
}
/*
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void *ret;
int order = get_order(size);
- /*
- * XXX fix me: the DMA API should pass us an explicit DMA mask
- * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
- * bit range instead of a 16MB one).
- */
- flags |= GFP_DMA;
-
ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
+ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* swiotlb_map_single(), which will grab memory from
* the lowest available address range.
*/
- dma_addr_t handle;
- handle = swiotlb_map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
- if (swiotlb_dma_mapping_error(hwdev, handle))
+ ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
+ if (!ret)
return NULL;
-
- ret = bus_to_virt(handle);
}
memset(ret, 0, size);
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
- if (address_needs_mapping(hwdev, dev_addr)) {
+ if (address_needs_mapping(hwdev, dev_addr, size)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
WARN_ON(irqs_disabled());
- if (!(vaddr >= (void *)io_tlb_start
- && vaddr < (void *)io_tlb_end))
+ if (!is_swiotlb_buffer(vaddr))
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+ unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
static void
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
return dev_addr;
/*
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
/*
* Ensure that the address returned is DMA'ble
*/
- if (address_needs_mapping(hwdev, dev_addr))
+ if (address_needs_mapping(hwdev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble");
return dev_addr;
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
for_each_sg(sgl, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
dev_addr = virt_to_bus(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ if (swiotlb_force ||
+ address_needs_mapping(hwdev, dev_addr, sg->length)) {
void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users