summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug46
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bitmap.c31
-rw-r--r--lib/bust_spinlocks.c1
-rw-r--r--lib/crc-ccitt.c6
-rw-r--r--lib/crc16.c10
-rw-r--r--lib/crc32.c54
-rw-r--r--lib/extable.c1
-rw-r--r--lib/genalloc.c263
-rw-r--r--lib/idr.c59
-rw-r--r--lib/iomap_copy.c28
-rw-r--r--lib/kernel_lock.c4
-rw-r--r--lib/kobject.c6
-rw-r--r--lib/kobject_uevent.c4
-rw-r--r--lib/libcrc32c.c2
-rw-r--r--lib/percpu_counter.c46
-rw-r--r--lib/plist.c118
-rw-r--r--lib/radix-tree.c197
-rw-r--r--lib/rbtree.c189
-rw-r--r--lib/reed_solomon/reed_solomon.c11
-rw-r--r--lib/semaphore-sleepers.c1
-rw-r--r--lib/spinlock_debug.c1
-rw-r--r--lib/string.c30
-rw-r--r--lib/textsearch.c1
-rw-r--r--lib/ts_bm.c1
-rw-r--r--lib/ts_fsm.c1
-rw-r--r--lib/ts_kmp.c1
-rw-r--r--lib/vsprintf.c88
-rw-r--r--lib/zlib_deflate/deflate.c25
-rw-r--r--lib/zlib_deflate/deflate_syms.c3
-rw-r--r--lib/zlib_inflate/Makefile4
-rw-r--r--lib/zlib_inflate/infblock.c365
-rw-r--r--lib/zlib_inflate/infblock.h48
-rw-r--r--lib/zlib_inflate/infcodes.c202
-rw-r--r--lib/zlib_inflate/infcodes.h33
-rw-r--r--lib/zlib_inflate/inffast.c462
-rw-r--r--lib/zlib_inflate/inffast.h12
-rw-r--r--lib/zlib_inflate/inffixed.h94
-rw-r--r--lib/zlib_inflate/inflate.c1086
-rw-r--r--lib/zlib_inflate/inflate.h107
-rw-r--r--lib/zlib_inflate/inflate_syms.c3
-rw-r--r--lib/zlib_inflate/inflate_sync.c152
-rw-r--r--lib/zlib_inflate/inftrees.c677
-rw-r--r--lib/zlib_inflate/inftrees.h99
-rw-r--r--lib/zlib_inflate/infutil.c88
-rw-r--r--lib/zlib_inflate/infutil.h176
47 files changed, 2492 insertions, 2352 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3de93357f5a..f6299342b88 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -86,4 +86,10 @@ config TEXTSEARCH_BM
config TEXTSEARCH_FSM
tristate
+#
+# plist support is select#ed if needed
+#
+config PLIST
+ boolean
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ccb0c1fdf1b..e4fcbd12cf6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -23,6 +23,22 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config UNUSED_SYMBOLS
+ bool "Enable unused/obsolete exported symbols"
+ default y if X86
+ help
+ Unused but exported symbols make the kernel needlessly bigger. For
+ that reason most of these unused exports will soon be removed. This
+ option is provided temporarily to provide a transition period in case
+ some external kernel module needs one of these symbols anyway. If you
+ encounter such a case in your module, consider if you are actually
+ using the right API. (rationale: since nobody in the kernel is using
+ this in a module, there is a pretty good chance it's actually the
+ wrong interface to use). If you really need the symbol, please send a
+ mail to the linux kernel mailing list mentioning the symbol and why
+ you really need it, and what the merge plan to the mainline kernel for
+ your module is.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -107,6 +123,24 @@ config DEBUG_MUTEXES
This allows mutex semantics violations and mutex related deadlocks
(lockups) to be detected and reported automatically.
+config DEBUG_RT_MUTEXES
+ bool "RT Mutex debugging, deadlock detection"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This allows rt mutex semantics violations and rt mutex related
+ deadlocks (lockups) to be detected and reported automatically.
+
+config DEBUG_PI_LIST
+ bool
+ default y
+ depends on DEBUG_RT_MUTEXES
+
+config RT_MUTEX_TESTER
+ bool "Built-in scriptable tester for rt-mutexes"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This option enables a rt-mutex tester.
+
config DEBUG_SPINLOCK
bool "Spinlock debugging"
depends on DEBUG_KERNEL
@@ -188,14 +222,22 @@ config FRAME_POINTER
config UNWIND_INFO
bool "Compile the kernel with frame unwind information"
- depends on !IA64
- depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || V850)
+ depends on !IA64 && !PARISC
+ depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
help
If you say Y here the resulting kernel image will be slightly larger
but not slower, and it will give very useful debugging information.
If you don't debug the kernel, you can say N, but we may not be able
to solve problems without frame unwind information or frame pointers.
+config STACK_UNWIND
+ bool "Stack unwind support"
+ depends on UNWIND_INFO
+ depends on X86
+ help
+ This enables more precise stack traces, omitting all unrelated
+ occurrences of pointers into kernel code from the dump.
+
config FORCED_INLINING
bool "Force gcc to inline functions marked 'inline'"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index b830c9a1554..10c13c9d782 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,7 @@ lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
@@ -46,6 +47,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
+obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ed2ae3b0cd0..d71e38c54ea 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -317,16 +317,16 @@ EXPORT_SYMBOL(bitmap_scnprintf);
/**
* bitmap_parse - convert an ASCII hex string into a bitmap.
- * @buf: pointer to buffer in user space containing string.
- * @buflen: buffer size in bytes. If string is smaller than this
+ * @ubuf: pointer to buffer in user space containing string.
+ * @ubuflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Commas group hex digits into chunks. Each chunk defines exactly 32
* bits of the resultant bitmask. No chunk may specify a value larger
- * than 32 bits (-EOVERFLOW), and if a chunk specifies a smaller value
- * then leading 0-bits are prepended. -EINVAL is returned for illegal
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended. %-EINVAL is returned for illegal
* characters and for grouping errors such as "1,,5", ",44", "," and "".
* Leading and trailing whitespace accepted, but not embedded whitespace.
*/
@@ -452,8 +452,8 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
/**
* bitmap_parselist - convert list format ASCII string to bitmap
- * @buf: read nul-terminated user string from this buffer
- * @mask: write resulting mask here
+ * @bp: read nul-terminated user string from this buffer
+ * @maskp: write resulting mask here
* @nmaskbits: number of bits in mask to be written
*
* Input format is a comma-separated list of decimal numbers and
@@ -461,10 +461,11 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
* decimal numbers, the smallest and largest bit numbers set in
* the range.
*
- * Returns 0 on success, -errno on invalid input strings:
- * -EINVAL: second number in range smaller than first
- * -EINVAL: invalid character in string
- * -ERANGE: bit number specified too large for mask
+ * Returns 0 on success, -errno on invalid input strings.
+ * Error values:
+ * %-EINVAL: second number in range smaller than first
+ * %-EINVAL: invalid character in string
+ * %-ERANGE: bit number specified too large for mask
*/
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
{
@@ -625,10 +626,10 @@ EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
- * @oldbit - bit position to be mapped
- * @old: defines domain of map
- * @new: defines range of map
- * @bits: number of bits in each of these bitmaps
+ * @oldbit: bit position to be mapped
+ * @old: defines domain of map
+ * @new: defines range of map
+ * @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
@@ -790,7 +791,7 @@ EXPORT_SYMBOL(bitmap_release_region);
*
* Allocate (set bits in) a specified region of a bitmap.
*
- * Return 0 on success, or -EBUSY if specified region wasn't
+ * Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 6bb7319e09a..a2055bc3ef6 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -7,7 +7,6 @@
* and panic() information from reaching the user.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index 115d149af40..7f6dd68d2d0 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -53,9 +53,9 @@ EXPORT_SYMBOL(crc_ccitt_table);
/**
* crc_ccitt - recompute the CRC for the data buffer
- * @crc - previous CRC value
- * @buffer - data pointer
- * @len - number of bytes in the buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
*/
u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
{
diff --git a/lib/crc16.c b/lib/crc16.c
index 011fe573c66..8737b084d1f 100644
--- a/lib/crc16.c
+++ b/lib/crc16.c
@@ -47,12 +47,12 @@ u16 const crc16_table[256] = {
EXPORT_SYMBOL(crc16_table);
/**
- * Compute the CRC-16 for the data buffer
+ * crc16 - compute the CRC-16 for the data buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
*
- * @param crc previous CRC value
- * @param buffer data pointer
- * @param len number of bytes in the buffer
- * @return the updated CRC value
+ * Returns the updated CRC value.
*/
u16 crc16(u16 crc, u8 const *buffer, size_t len)
{
diff --git a/lib/crc32.c b/lib/crc32.c
index 065198f98b3..285fd9bc61b 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -42,20 +42,21 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Ethernet CRC32 calculations");
MODULE_LICENSE("GPL");
+/**
+ * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len);
+
#if CRC_LE_BITS == 1
/*
* In fact, the table-based code will work in this case, but it can be
* simplified by inlining the table in ?: form.
*/
-/**
- * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
{
int i;
@@ -68,14 +69,6 @@ u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
}
#else /* Table-based approach */
-/**
- * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_LE_BITS == 8
@@ -145,20 +138,21 @@ u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
}
#endif
+/**
+ * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len);
+
#if CRC_BE_BITS == 1
/*
* In fact, the table-based code will work in this case, but it can be
* simplified by inlining the table in ?: form.
*/
-/**
- * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
{
int i;
@@ -173,14 +167,6 @@ u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#else /* Table-based approach */
-/**
- * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
- * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p - pointer to buffer over which CRC is run
- * @len - length of buffer @p
- *
- */
u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_BE_BITS == 8
@@ -249,6 +235,10 @@ u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#endif
+/**
+ * bitreverse - reverse the order of bits in a u32 value
+ * @x: value to be bit-reversed
+ */
u32 bitreverse(u32 x)
{
x = (x >> 16) | (x << 16);
diff --git a/lib/extable.c b/lib/extable.c
index 01c08b5836f..463f4560f16 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9ce0a6a3b85..71338b48e88 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -4,10 +4,6 @@
* Uses for this includes on-device special memory, uncached memory
* etc.
*
- * This code is based on the buddy allocator found in the sym53c8xx_2
- * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
- * and adapted for general purpose use.
- *
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
* This source code is licensed under the GNU General Public License,
@@ -15,172 +11,155 @@
*/
#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
#include <linux/genalloc.h>
-#include <asm/page.h>
-
-struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
- unsigned long (*fp)(struct gen_pool *),
- unsigned long data)
+/*
+ * Create a new special memory pool.
+ *
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node id of the node the pool structure should be allocated on, or -1
+ */
+struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
{
- struct gen_pool *poolp;
- unsigned long tmp;
- int i;
-
- /*
- * This is really an arbitrary limit, +10 is enough for
- * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
- * this can be increased without problems.
- */
- if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
- ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
- return NULL;
-
- if (!max_chunk_shift)
- max_chunk_shift = PAGE_SHIFT;
-
- poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
- if (!poolp)
- return NULL;
- memset(poolp, 0, sizeof(struct gen_pool));
- poolp->h = kmalloc(sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
- GFP_KERNEL);
- if (!poolp->h) {
- printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
- kfree(poolp);
- return NULL;
- }
- memset(poolp->h, 0, sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
-
- spin_lock_init(&poolp->lock);
- poolp->get_new_chunk = fp;
- poolp->max_chunk_shift = max_chunk_shift;
- poolp->private = data;
-
- for (i = 0; i < nr_chunks; i++) {
- tmp = poolp->get_new_chunk(poolp);
- printk(KERN_INFO "allocated %lx\n", tmp);
- if (!tmp)
- break;
- gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
- }
+ struct gen_pool *pool;
- return poolp;
+ pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
+ if (pool != NULL) {
+ rwlock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->chunks);
+ pool->min_alloc_order = min_alloc_order;
+ }
+ return pool;
}
EXPORT_SYMBOL(gen_pool_create);
/*
- * Simple power of two buddy-like generic allocator.
- * Provides naturally aligned memory chunks.
+ * Add a new chunk of memory to the specified pool.
+ *
+ * @pool: pool to add new memory chunk to
+ * @addr: starting address of memory chunk to add to pool
+ * @size: size in bytes of the memory chunk to add to pool
+ * @nid: node id of the node the chunk structure and bitmap should be
+ * allocated on, or -1
*/
-unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
+int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
+ int nid)
{
- int j, i, s, max_chunk_size;
- unsigned long a, flags;
- struct gen_pool_link *h = poolp->h;
+ struct gen_pool_chunk *chunk;
+ int nbits = size >> pool->min_alloc_order;
+ int nbytes = sizeof(struct gen_pool_chunk) +
+ (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+ if (unlikely(chunk == NULL))
+ return -1;
- if (size > max_chunk_size)
- return 0;
+ memset(chunk, 0, nbytes);
+ spin_lock_init(&chunk->lock);
+ chunk->start_addr = addr;
+ chunk->end_addr = addr + size;
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- j = i -= ALLOC_MIN_SHIFT;
-
- spin_lock_irqsave(&poolp->lock, flags);
- while (!h[j].next) {
- if (s == max_chunk_size) {
- struct gen_pool_link *ptr;
- spin_unlock_irqrestore(&poolp->lock, flags);
- ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
- spin_lock_irqsave(&poolp->lock, flags);
- h[j].next = ptr;
- if (h[j].next)
- h[j].next->next = NULL;
- break;
- }
- j++;
- s <<= 1;
- }
- a = (unsigned long) h[j].next;
- if (a) {
- h[j].next = h[j].next->next;
- /*
- * This should be split into a seperate function doing
- * the chunk split in order to support custom
- * handling memory not physically accessible by host
- */
- while (j > i) {
- j -= 1;
- s >>= 1;
- h[j].next = (struct gen_pool_link *) (a + s);
- h[j].next->next = NULL;
- }
- }
- spin_unlock_irqrestore(&poolp->lock, flags);
- return a;
+ write_lock(&pool->lock);
+ list_add(&chunk->next_chunk, &pool->chunks);
+ write_unlock(&pool->lock);
+
+ return 0;
}
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_add);
/*
- * Counter-part of the generic allocator.
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses a first-fit algorithm.
+ *
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
*/
-void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
+unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
- struct gen_pool_link *q;
- struct gen_pool_link *h = poolp->h;
- unsigned long a, b, flags;
- int i, s, max_chunk_size;
-
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long addr, flags;
+ int order = pool->min_alloc_order;
+ int nbits, bit, start_bit, end_bit;
- if (size > max_chunk_size)
- return;
-
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- i -= ALLOC_MIN_SHIFT;
-
- a = ptr;
+ if (size == 0)
+ return 0;
- spin_lock_irqsave(&poolp->lock, flags);
- while (1) {
- if (s == max_chunk_size) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
- break;
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit -= nbits + 1;
+
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = -1;
+ while (bit + 1 < end_bit) {
+ bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
+ if (bit >= end_bit)
+ break;
+
+ start_bit = bit;
+ if (nbits > 1) {
+ bit = find_next_bit(chunk->bits, bit + nbits,
+ bit + 1);
+ if (bit - start_bit < nbits)
+ continue;
+ }
+
+ addr = chunk->start_addr +
+ ((unsigned long)start_bit << order);
+ while (nbits--)
+ __set_bit(start_bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
- b = a ^ s;
- q = &h[i];
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ }
+ read_unlock(&pool->lock);
+ return 0;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
- while (q->next && q->next != (struct gen_pool_link *)b)
- q = q->next;
- if (!q->next) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
+/*
+ * Free the specified memory back to the specified pool.
+ *
+ * @pool: pool to free to
+ * @addr: starting address of memory to free back to pool
+ * @size: size in bytes of memory to free
+ */
+void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+{
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long flags;
+ int order = pool->min_alloc_order;
+ int bit, nbits;
+
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ BUG_ON(addr + size > chunk->end_addr);
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = (addr - chunk->start_addr) >> order;
+ while (nbits--)
+ __clear_bit(bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
break;
}
- q->next = q->next->next;
- a = a & b;
- s <<= 1;
- i++;
}
- spin_unlock_irqrestore(&poolp->lock, flags);
+ BUG_ON(nbits > 0);
+ read_unlock(&pool->lock);
}
EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/idr.c b/lib/idr.c
index d226259c3c2..4d096819511 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/module.h>
#endif
+#include <linux/err.h>
#include <linux/string.h>
#include <linux/idr.h>
@@ -48,15 +49,21 @@ static struct idr_layer *alloc_layer(struct idr *idp)
return(p);
}
+/* only called when idp->lock is held */
+static void __free_layer(struct idr *idp, struct idr_layer *p)
+{
+ p->ary[0] = idp->id_free;
+ idp->id_free = p;
+ idp->id_free_cnt++;
+}
+
static void free_layer(struct idr *idp, struct idr_layer *p)
{
/*
* Depends on the return element being zeroed.
*/
spin_lock(&idp->lock);
- p->ary[0] = idp->id_free;
- idp->id_free = p;
- idp->id_free_cnt++;
+ __free_layer(idp, p);
spin_unlock(&idp->lock);
}
@@ -184,12 +191,14 @@ build_up:
* The allocation failed. If we built part of
* the structure tear it down.
*/
+ spin_lock(&idp->lock);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
- free_layer(idp, new);
+ __free_layer(idp, new);
}
+ spin_unlock(&idp->lock);
return -1;
}
new->ary[0] = p;
@@ -390,6 +399,48 @@ void *idr_find(struct idr *idp, int id)
}
EXPORT_SYMBOL(idr_find);
+/**
+ * idr_replace - replace pointer for given id
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: lookup key
+ *
+ * Replace the pointer registered with an id and return the old value.
+ * A -ENOENT return indicates that @id was not found.
+ * A -EINVAL return indicates that @id was not within valid constraints.
+ *
+ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ */
+void *idr_replace(struct idr *idp, void *ptr, int id)
+{
+ int n;
+ struct idr_layer *p, *old_p;
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+
+ id &= MAX_ID_MASK;
+
+ if (id >= (1 << n))
+ return ERR_PTR(-EINVAL);
+
+ n -= IDR_BITS;
+ while ((n > 0) && p) {
+ p = p->ary[(id >> n) & IDR_MASK];
+ n -= IDR_BITS;
+ }
+
+ n = id & IDR_MASK;
+ if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+ return ERR_PTR(-ENOENT);
+
+ old_p = p->ary[n];
+ p->ary[n] = ptr;
+
+ return old_p;
+}
+EXPORT_SYMBOL(idr_replace);
+
static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
unsigned long flags)
{
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index 351045f4f63..864fc5ea398 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -40,3 +40,31 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
+
+/**
+ * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
+ * @to: destination, in MMIO space (must be 64-bit aligned)
+ * @from: source (must be 64-bit aligned)
+ * @count: number of 64-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+#ifdef CONFIG_64BIT
+ u64 __iomem *dst = to;
+ const u64 *src = from;
+ const u64 *end = src + count;
+
+ while (src < end)
+ __raw_writeq(*src++, dst++);
+#else
+ __iowrite32_copy(to, from, count * 2);
+#endif
+}
+
+EXPORT_SYMBOL_GPL(__iowrite64_copy);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cb5490ec00f..e713e86811a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -14,7 +14,7 @@
* The 'big kernel semaphore'
*
* This mutex is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
@@ -92,7 +92,7 @@ void __lockfunc unlock_kernel(void)
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reaquired
+ * and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
diff --git a/lib/kobject.c b/lib/kobject.c
index 687ab418d29..8e7c7199348 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -198,14 +198,14 @@ int kobject_add(struct kobject * kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- pr_debug("kobject_add failed for %s with -EEXIST, "
+ printk("kobject_add failed for %s with -EEXIST, "
"don't try to register things with the "
"same name in the same directory.\n",
kobject_name(kobj));
else
- pr_debug("kobject_add failed for %s (%d)\n",
+ printk("kobject_add failed for %s (%d)\n",
kobject_name(kobj), error);
- /* dump_stack(); */
+ dump_stack();
}
return error;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7f20e7b857c..2b1530fc573 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -50,6 +50,10 @@ static char *action_to_string(enum kobject_action action)
return "offline";
case KOBJ_ONLINE:
return "online";
+ case KOBJ_DOCK:
+ return "dock";
+ case KOBJ_UNDOCK:
+ return "undock";
default:
return NULL;
}
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 52b6dc144ce..60f46803af3 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -88,7 +88,7 @@ crc32c_le(u32 crc, unsigned char const *p, size_t len)
* reflect output bytes = true
*/
-static u32 crc32c_table[256] = {
+static const u32 crc32c_table[256] = {
0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
new file mode 100644
index 00000000000..850449080e1
--- /dev/null
+++ b/lib/percpu_counter.c
@@ -0,0 +1,46 @@
+/*
+ * Fast batching percpu counters.
+ */
+
+#include <linux/percpu_counter.h>
+#include <linux/module.h>
+
+void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
+{
+ long count;
+ s32 *pcount;
+ int cpu = get_cpu();
+
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ count = *pcount + amount;
+ if (count >= FBC_BATCH || count <= -FBC_BATCH) {
+ spin_lock(&fbc->lock);
+ fbc->count += count;
+ *pcount = 0;
+ spin_unlock(&fbc->lock);
+ } else {
+ *pcount = count;
+ }
+ put_cpu();
+}
+EXPORT_SYMBOL(percpu_counter_mod);
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ s64 ret;
+ int cpu;
+
+ spin_lock(&fbc->lock);
+ ret = fbc->count;
+ for_each_possible_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+ spin_unlock(&fbc->lock);
+ return ret < 0 ? 0 : ret;
+}
+EXPORT_SYMBOL(percpu_counter_sum);
diff --git a/lib/plist.c b/lib/plist.c
new file mode 100644
index 00000000000..3074a02272f
--- /dev/null
+++ b/lib/plist.c
@@ -0,0 +1,118 @@
+/*
+ * lib/plist.c
+ *
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Licensed under the FSF's GNU Public License v2 or later.
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This file contains the add / del functions which are considered to
+ * be too large to inline. See include/linux/plist.h for further
+ * information.
+ */
+
+#include <linux/plist.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_PI_LIST
+
+static void plist_check_prev_next(struct list_head *t, struct list_head *p,
+ struct list_head *n)
+{
+ if (n->prev != p || p->next != n) {
+ printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
+ printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
+ printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
+ WARN_ON(1);
+ }
+}
+
+static void plist_check_list(struct list_head *top)
+{
+ struct list_head *prev = top, *next = top->next;
+
+ plist_check_prev_next(top, prev, next);
+ while (next != top) {
+ prev = next;
+ next = prev->next;
+ plist_check_prev_next(top, prev, next);
+ }
+}
+
+static void plist_check_head(struct plist_head *head)
+{
+ WARN_ON(!head->lock);
+ if (head->lock)
+ WARN_ON_SMP(!spin_is_locked(head->lock));
+ plist_check_list(&head->prio_list);
+ plist_check_list(&head->node_list);
+}
+
+#else
+# define plist_check_head(h) do { } while (0)
+#endif
+
+/**
+ * plist_add - add @node to @head
+ *
+ * @node: &struct plist_node pointer
+ * @head: &struct plist_head pointer
+ */
+void plist_add(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+
+ plist_check_head(head);
+ WARN_ON(!plist_node_empty(node));
+
+ list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
+ if (node->prio < iter->prio)
+ goto lt_prio;
+ else if (node->prio == iter->prio) {
+ iter = list_entry(iter->plist.prio_list.next,
+ struct plist_node, plist.prio_list);
+ goto eq_prio;
+ }
+ }
+
+lt_prio:
+ list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
+eq_prio:
+ list_add_tail(&node->plist.node_list, &iter->plist.node_list);
+
+ plist_check_head(head);
+}
+
+/**
+ * plist_del - Remove a @node from plist.
+ *
+ * @node: &struct plist_node pointer - entry to be removed
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_del(struct plist_node *node, struct plist_head *head)
+{
+ plist_check_head(head);
+
+ if (!list_empty(&node->plist.prio_list)) {
+ struct plist_node *next = plist_first(&node->plist);
+
+ list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
+ list_del_init(&node->plist.prio_list);
+ }
+
+ list_del_init(&node->plist.node_list);
+
+ plist_check_head(head);
+}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7097bb239e4..637d55608de 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -33,7 +33,7 @@
#ifdef __KERNEL__
-#define RADIX_TREE_MAP_SHIFT 6
+#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
#else
#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
#endif
@@ -74,6 +74,11 @@ struct radix_tree_preload {
};
DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
+{
+ return root->gfp_mask & __GFP_BITS_MASK;
+}
+
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -82,9 +87,10 @@ static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root)
{
struct radix_tree_node *ret;
+ gfp_t gfp_mask = root_gfp_mask(root);
- ret = kmem_cache_alloc(radix_tree_node_cachep, root->gfp_mask);
- if (ret == NULL && !(root->gfp_mask & __GFP_WAIT)) {
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+ if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
struct radix_tree_preload *rtp;
rtp = &__get_cpu_var(radix_tree_preloads);
@@ -152,6 +158,27 @@ static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
return test_bit(offset, node->tags[tag]);
}
+static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask |= (1 << (tag + __GFP_BITS_SHIFT));
+}
+
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask &= ~(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->gfp_mask &= __GFP_BITS_MASK;
+}
+
+static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+{
+ return root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+}
+
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
@@ -182,7 +209,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_node *node;
unsigned int height;
- char tags[RADIX_TREE_MAX_TAGS];
int tag;
/* Figure out what the height should be. */
@@ -195,16 +221,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
goto out;
}
- /*
- * Prepare the tag status of the top-level node for propagation
- * into the newly-pushed top-level node(s)
- */
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 0;
- if (any_tag_set(root->rnode, tag))
- tags[tag] = 1;
- }
-
do {
if (!(node = radix_tree_node_alloc(root)))
return -ENOMEM;
@@ -214,7 +230,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Propagate the aggregated tag info into the new root */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
+ if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
@@ -243,8 +259,7 @@ int radix_tree_insert(struct radix_tree_root *root,
int error;
/* Make sure the tree is high enough. */
- if ((!index && !root->rnode) ||
- index > radix_tree_maxindex(root->height)) {
+ if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
if (error)
return error;
@@ -255,7 +270,7 @@ int radix_tree_insert(struct radix_tree_root *root,
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
offset = 0; /* uninitialised var warning */
- do {
+ while (height > 0) {
if (slot == NULL) {
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
@@ -273,16 +288,21 @@ int radix_tree_insert(struct radix_tree_root *root,
slot = node->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
- } while (height > 0);
+ }
if (slot != NULL)
return -EEXIST;
- BUG_ON(!node);
- node->count++;
- node->slots[offset] = item;
- BUG_ON(tag_get(node, 0, offset));
- BUG_ON(tag_get(node, 1, offset));
+ if (node) {
+ node->count++;
+ node->slots[offset] = item;
+ BUG_ON(tag_get(node, 0, offset));
+ BUG_ON(tag_get(node, 1, offset));
+ } else {
+ root->rnode = item;
+ BUG_ON(root_tag_get(root, 0));
+ BUG_ON(root_tag_get(root, 1));
+ }
return 0;
}
@@ -295,9 +315,13 @@ static inline void **__lookup_slot(struct radix_tree_root *root,
struct radix_tree_node **slot;
height = root->height;
+
if (index > radix_tree_maxindex(height))
return NULL;
+ if (height == 0 && root->rnode)
+ return (void **)&root->rnode;
+
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = &root->rnode;
@@ -365,11 +389,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
struct radix_tree_node *slot;
height = root->height;
- if (index > radix_tree_maxindex(height))
- return NULL;
+ BUG_ON(index > radix_tree_maxindex(height));
- shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
+ shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
while (height > 0) {
int offset;
@@ -383,6 +406,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
height--;
}
+ /* set the root's tag bit */
+ if (slot && !root_tag_get(root, tag))
+ root_tag_set(root, tag);
+
return slot;
}
EXPORT_SYMBOL(radix_tree_tag_set);
@@ -405,9 +432,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
height = root->height;
if (index > radix_tree_maxindex(height))
@@ -432,20 +458,24 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
height--;
}
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- do {
+ while (pathp->node) {
if (!tag_get(pathp->node, tag, pathp->offset))
goto out;
tag_clear(pathp->node, tag, pathp->offset);
if (any_tag_set(pathp->node, tag))
goto out;
pathp--;
- } while (pathp->node);
+ }
+
+ /* clear the root's tag bit */
+ if (root_tag_get(root, tag))
+ root_tag_clear(root, tag);
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_tag_clear);
@@ -458,9 +488,8 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
*
* Return values:
*
- * 0: tag not present
- * 1: tag present, set
- * -1: tag present, unset
+ * 0: tag not present or not set
+ * 1: tag set
*/
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
@@ -473,6 +502,13 @@ int radix_tree_tag_get(struct radix_tree_root *root,
if (index > radix_tree_maxindex(height))
return 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ if (height == 0)
+ return 1;
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -494,7 +530,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
int ret = tag_get(slot, tag, offset);
BUG_ON(ret && saw_unset_tag);
- return ret ? 1 : -1;
+ return !!ret;
}
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
@@ -514,8 +550,11 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
unsigned long i;
height = root->height;
- if (height == 0)
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
goto out;
+ }
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -603,10 +642,16 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
unsigned int height = root->height;
struct radix_tree_node *slot;
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
- while (height > 0) {
+ do {
unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
@@ -637,7 +682,7 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
}
shift -= RADIX_TREE_MAP_SHIFT;
slot = slot->slots[i];
- }
+ } while (height > 0);
out:
*next_index = index;
return nr_found;
@@ -665,6 +710,10 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long cur_index = first_index;
unsigned int ret = 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
while (ret < max_items) {
unsigned int nr_found;
unsigned long next_index; /* Index of next search */
@@ -689,7 +738,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
static inline void radix_tree_shrink(struct radix_tree_root *root)
{
/* try to shrink tree height */
- while (root->height > 1 &&
+ while (root->height > 0 &&
root->rnode->count == 1 &&
root->rnode->slots[0]) {
struct radix_tree_node *to_free = root->rnode;
@@ -717,12 +766,8 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_path *orig_pathp;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
- char tags[RADIX_TREE_MAX_TAGS];
- int nr_cleared_tags;
int tag;
int offset;
@@ -730,11 +775,17 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
if (index > radix_tree_maxindex(height))
goto out;
+ slot = root->rnode;
+ if (height == 0 && root->rnode) {
+ root_tag_clear_all(root);
+ root->rnode = NULL;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
- slot = root->rnode;
- for ( ; height > 0; height--) {
+ do {
if (slot == NULL)
goto out;
@@ -744,44 +795,22 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
pathp->node = slot;
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
- }
+ height--;
+ } while (height > 0);
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- orig_pathp = pathp;
-
/*
* Clear all tags associated with the just-deleted item
*/
- nr_cleared_tags = 0;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 1;
- if (tag_get(pathp->node, tag, pathp->offset)) {
- tag_clear(pathp->node, tag, pathp->offset);
- if (!any_tag_set(pathp->node, tag)) {
- tags[tag] = 0;
- nr_cleared_tags++;
- }
- }
- }
-
- for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
- continue;
-
- tag_clear(pathp->node, tag, pathp->offset);
- if (any_tag_set(pathp->node, tag)) {
- tags[tag] = 1;
- nr_cleared_tags--;
- }
- }
+ if (tag_get(pathp->node, tag, pathp->offset))
+ radix_tree_tag_clear(root, index, tag);
}
/* Now free the nodes we do not need anymore */
- for (pathp = orig_pathp; pathp->node; pathp--) {
+ while (pathp->node) {
pathp->node->slots[pathp->offset] = NULL;
pathp->node->count--;
@@ -793,11 +822,15 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
/* Node with zero slots in use so free it */
radix_tree_node_free(pathp->node);
+
+ pathp--;
}
- root->rnode = NULL;
+ root_tag_clear_all(root);
root->height = 0;
+ root->rnode = NULL;
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_delete);
@@ -808,11 +841,7 @@ EXPORT_SYMBOL(radix_tree_delete);
*/
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
{
- struct radix_tree_node *rnode;
- rnode = root->rnode;
- if (!rnode)
- return 0;
- return any_tag_set(rnode, tag);
+ return root_tag_get(root, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 14b791ac508..1e55ba1c2ed 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -26,60 +26,66 @@
static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
{
struct rb_node *right = node->rb_right;
+ struct rb_node *parent = rb_parent(node);
if ((node->rb_right = right->rb_left))
- right->rb_left->rb_parent = node;
+ rb_set_parent(right->rb_left, node);
right->rb_left = node;
- if ((right->rb_parent = node->rb_parent))
+ rb_set_parent(right, parent);
+
+ if (parent)
{
- if (node == node->rb_parent->rb_left)
- node->rb_parent->rb_left = right;
+ if (node == parent->rb_left)
+ parent->rb_left = right;
else
- node->rb_parent->rb_right = right;
+ parent->rb_right = right;
}
else
root->rb_node = right;
- node->rb_parent = right;
+ rb_set_parent(node, right);
}
static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
{
struct rb_node *left = node->rb_left;
+ struct rb_node *parent = rb_parent(node);
if ((node->rb_left = left->rb_right))
- left->rb_right->rb_parent = node;
+ rb_set_parent(left->rb_right, node);
left->rb_right = node;
- if ((left->rb_parent = node->rb_parent))
+ rb_set_parent(left, parent);
+
+ if (parent)
{
- if (node == node->rb_parent->rb_right)
- node->rb_parent->rb_right = left;
+ if (node == parent->rb_right)
+ parent->rb_right = left;
else
- node->rb_parent->rb_left = left;
+ parent->rb_left = left;
}
else
root->rb_node = left;
- node->rb_parent = left;
+ rb_set_parent(node, left);
}
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
struct rb_node *parent, *gparent;
- while ((parent = node->rb_parent) && parent->rb_color == RB_RED)
+ while ((parent = rb_parent(node)) && rb_is_red(parent))
{
- gparent = parent->rb_parent;
+ gparent = rb_parent(parent);
if (parent == gparent->rb_left)
{
{
register struct rb_node *uncle = gparent->rb_right;
- if (uncle && uncle->rb_color == RB_RED)
+ if (uncle && rb_is_red(uncle))
{
- uncle->rb_color = RB_BLACK;
- parent->rb_color = RB_BLACK;
- gparent->rb_color = RB_RED;
+ rb_set_black(uncle);
+ rb_set_black(parent);
+ rb_set_red(gparent);
node = gparent;
continue;
}
@@ -94,17 +100,17 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root)
node = tmp;
}
- parent->rb_color = RB_BLACK;
- gparent->rb_color = RB_RED;
+ rb_set_black(parent);
+ rb_set_red(gparent);
__rb_rotate_right(gparent, root);
} else {
{
register struct rb_node *uncle = gparent->rb_left;
- if (uncle && uncle->rb_color == RB_RED)
+ if (uncle && rb_is_red(uncle))
{
- uncle->rb_color = RB_BLACK;
- parent->rb_color = RB_BLACK;
- gparent->rb_color = RB_RED;
+ rb_set_black(uncle);
+ rb_set_black(parent);
+ rb_set_red(gparent);
node = gparent;
continue;
}
@@ -119,13 +125,13 @@ void rb_insert_color(struct rb_node *node, struct rb_root *root)
node = tmp;
}
- parent->rb_color = RB_BLACK;
- gparent->rb_color = RB_RED;
+ rb_set_black(parent);
+ rb_set_red(gparent);
__rb_rotate_left(gparent, root);
}
}
- root->rb_node->rb_color = RB_BLACK;
+ rb_set_black(root->rb_node);
}
EXPORT_SYMBOL(rb_insert_color);
@@ -134,43 +140,40 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
{
struct rb_node *other;
- while ((!node || node->rb_color == RB_BLACK) && node != root->rb_node)
+ while ((!node || rb_is_black(node)) && node != root->rb_node)
{
if (parent->rb_left == node)
{
other = parent->rb_right;
- if (other->rb_color == RB_RED)
+ if (rb_is_red(other))
{
- other->rb_color = RB_BLACK;
- parent->rb_color = RB_RED;
+ rb_set_black(other);
+ rb_set_red(parent);
__rb_rotate_left(parent, root);
other = parent->rb_right;
}
- if ((!other->rb_left ||
- other->rb_left->rb_color == RB_BLACK)
- && (!other->rb_right ||
- other->rb_right->rb_color == RB_BLACK))
+ if ((!other->rb_left || rb_is_black(other->rb_left)) &&
+ (!other->rb_right || rb_is_black(other->rb_right)))
{
- other->rb_color = RB_RED;
+ rb_set_red(other);
node = parent;
- parent = node->rb_parent;
+ parent = rb_parent(node);
}
else
{
- if (!other->rb_right ||
- other->rb_right->rb_color == RB_BLACK)
+ if (!other->rb_right || rb_is_black(other->rb_right))
{
- register struct rb_node *o_left;
+ struct rb_node *o_left;
if ((o_left = other->rb_left))
- o_left->rb_color = RB_BLACK;
- other->rb_color = RB_RED;
+ rb_set_black(o_left);
+ rb_set_red(other);
__rb_rotate_right(other, root);
other = parent->rb_right;
}
- other->rb_color = parent->rb_color;
- parent->rb_color = RB_BLACK;
+ rb_set_color(other, rb_color(parent));
+ rb_set_black(parent);
if (other->rb_right)
- other->rb_right->rb_color = RB_BLACK;
+ rb_set_black(other->rb_right);
__rb_rotate_left(parent, root);
node = root->rb_node;
break;
@@ -179,38 +182,35 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
else
{
other = parent->rb_left;
- if (other->rb_color == RB_RED)
+ if (rb_is_red(other))
{
- other->rb_color = RB_BLACK;
- parent->rb_color = RB_RED;
+ rb_set_black(other);
+ rb_set_red(parent);
__rb_rotate_right(parent, root);
other = parent->rb_left;
}
- if ((!other->rb_left ||
- other->rb_left->rb_color == RB_BLACK)
- && (!other->rb_right ||
- other->rb_right->rb_color == RB_BLACK))
+ if ((!other->rb_left || rb_is_black(other->rb_left)) &&
+ (!other->rb_right || rb_is_black(other->rb_right)))
{
- other->rb_color = RB_RED;
+ rb_set_red(other);
node = parent;
- parent = node->rb_parent;
+ parent = rb_parent(node);
}
else
{
- if (!other->rb_left ||
- other->rb_left->rb_color == RB_BLACK)
+ if (!other->rb_left || rb_is_black(other->rb_left))
{
register struct rb_node *o_right;
if ((o_right = other->rb_right))
- o_right->rb_color = RB_BLACK;
- other->rb_color = RB_RED;
+ rb_set_black(o_right);
+ rb_set_red(other);
__rb_rotate_left(other, root);
other = parent->rb_left;
}
- other->rb_color = parent->rb_color;
- parent->rb_color = RB_BLACK;
+ rb_set_color(other, rb_color(parent));
+ rb_set_black(parent);
if (other->rb_left)
- other->rb_left->rb_color = RB_BLACK;
+ rb_set_black(other->rb_left);
__rb_rotate_right(parent, root);
node = root->rb_node;
break;
@@ -218,7 +218,7 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
}
}
if (node)
- node->rb_color = RB_BLACK;
+ rb_set_black(node);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
@@ -238,48 +238,41 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
while ((left = node->rb_left) != NULL)
node = left;
child = node->rb_right;
- parent = node->rb_parent;
- color = node->rb_color;
+ parent = rb_parent(node);
+ color = rb_color(node);
if (child)
- child->rb_parent = parent;
- if (parent)
- {
- if (parent->rb_left == node)
- parent->rb_left = child;
- else
- parent->rb_right = child;
- }
- else
- root->rb_node = child;
-
- if (node->rb_parent == old)
+ rb_set_parent(child, parent);
+ if (parent == old) {
+ parent->rb_right = child;
parent = node;
- node->rb_parent = old->rb_parent;
- node->rb_color = old->rb_color;
+ } else
+ parent->rb_left = child;
+
+ node->rb_parent_color = old->rb_parent_color;
node->rb_right = old->rb_right;
node->rb_left = old->rb_left;
- if (old->rb_parent)
+ if (rb_parent(old))
{
- if (old->rb_parent->rb_left == old)
- old->rb_parent->rb_left = node;
+ if (rb_parent(old)->rb_left == old)
+ rb_parent(old)->rb_left = node;
else
- old->rb_parent->rb_right = node;
+ rb_parent(old)->rb_right = node;
} else
root->rb_node = node;
- old->rb_left->rb_parent = node;
+ rb_set_parent(old->rb_left, node);
if (old->rb_right)
- old->rb_right->rb_parent = node;
+ rb_set_parent(old->rb_right, node);
goto color;
}
- parent = node->rb_parent;
- color = node->rb_color;
+ parent = rb_parent(node);
+ color = rb_color(node);
if (child)
- child->rb_parent = parent;
+ rb_set_parent(child, parent);
if (parent)
{
if (parent->rb_left == node)
@@ -327,6 +320,8 @@ EXPORT_SYMBOL(rb_last);
struct rb_node *rb_next(struct rb_node *node)
{
+ struct rb_node *parent;
+
/* If we have a right-hand child, go down and then left as far
as we can. */
if (node->rb_right) {
@@ -342,15 +337,17 @@ struct rb_node *rb_next(struct rb_node *node)
ancestor is a right-hand child of its parent, keep going
up. First time it's a left-hand child of its parent, said
parent is our 'next' node. */
- while (node->rb_parent && node == node->rb_parent->rb_right)
- node = node->rb_parent;
+ while ((parent = rb_parent(node)) && node == parent->rb_right)
+ node = parent;
- return node->rb_parent;
+ return parent;
}
EXPORT_SYMBOL(rb_next);
struct rb_node *rb_prev(struct rb_node *node)
{
+ struct rb_node *parent;
+
/* If we have a left-hand child, go down and then right as far
as we can. */
if (node->rb_left) {
@@ -362,17 +359,17 @@ struct rb_node *rb_prev(struct rb_node *node)
/* No left-hand children. Go up till we find an ancestor which
is a right-hand child of its parent */
- while (node->rb_parent && node == node->rb_parent->rb_left)
- node = node->rb_parent;
+ while ((parent = rb_parent(node)) && node == parent->rb_left)
+ node = parent;
- return node->rb_parent;
+ return parent;
}
EXPORT_SYMBOL(rb_prev);
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
- struct rb_node *parent = victim->rb_parent;
+ struct rb_node *parent = rb_parent(victim);
/* Set the surrounding nodes to point to the replacement */
if (parent) {
@@ -384,9 +381,9 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
root->rb_node = new;
}
if (victim->rb_left)
- victim->rb_left->rb_parent = new;
+ rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
- victim->rb_right->rb_parent = new;
+ rb_set_parent(victim->rb_right, new);
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index f8ac9fa95de..2cc11faa4ff 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -54,7 +54,6 @@ static DEFINE_MUTEX(rslistlock);
/**
* rs_init - Initialize a Reed-Solomon codec
- *
* @symsize: symbol size, bits (1-8)
* @gfpoly: Field generator polynomial coefficients
* @fcr: first root of RS code generator polynomial, index form
@@ -62,7 +61,7 @@ static DEFINE_MUTEX(rslistlock);
* @nroots: RS code generator polynomial degree (number of roots)
*
* Allocate a control structure and the polynom arrays for faster
- * en/decoding. Fill the arrays according to the given parameters
+ * en/decoding. Fill the arrays according to the given parameters.
*/
static struct rs_control *rs_init(int symsize, int gfpoly, int fcr,
int prim, int nroots)
@@ -155,8 +154,7 @@ errrs:
/**
- * free_rs - Free the rs control structure, if its not longer used
- *
+ * free_rs - Free the rs control structure, if it is no longer used
* @rs: the control structure which is not longer used by the
* caller
*/
@@ -176,7 +174,6 @@ void free_rs(struct rs_control *rs)
/**
* init_rs - Find a matching or allocate a new rs control structure
- *
* @symsize: the symbol size (number of bits)
* @gfpoly: the extended Galois field generator polynomial coefficients,
* with the 0th coefficient in the low order bit. The polynomial
@@ -236,7 +233,6 @@ out:
#ifdef CONFIG_REED_SOLOMON_ENC8
/**
* encode_rs8 - Calculate the parity for data values (8bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @len: data length
@@ -258,7 +254,6 @@ EXPORT_SYMBOL_GPL(encode_rs8);
#ifdef CONFIG_REED_SOLOMON_DEC8
/**
* decode_rs8 - Decode codeword (8bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @par: received parity data field
@@ -285,7 +280,6 @@ EXPORT_SYMBOL_GPL(decode_rs8);
#ifdef CONFIG_REED_SOLOMON_ENC16
/**
* encode_rs16 - Calculate the parity for data values (16bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @len: data length
@@ -305,7 +299,6 @@ EXPORT_SYMBOL_GPL(encode_rs16);
#ifdef CONFIG_REED_SOLOMON_DEC16
/**
* decode_rs16 - Decode codeword (16bit data width)
- *
* @rs: the rs control structure
* @data: data field of a given type
* @par: received parity data field
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
index 4d5f18889fa..12818052386 100644
--- a/lib/semaphore-sleepers.c
+++ b/lib/semaphore-sleepers.c
@@ -12,7 +12,6 @@
*
* rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index d8b6bb419d4..93c15ee3f8e 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -6,7 +6,6 @@
* DEBUG_SPINLOCK.
*/
-#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/lib/string.c b/lib/string.c
index 064f6315b1c..63077267367 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -301,6 +301,36 @@ char *strnchr(const char *s, size_t count, int c)
EXPORT_SYMBOL(strnchr);
#endif
+/**
+ * strstrip - Removes leading and trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns a pointer to the first non-whitespace
+ * character in @s.
+ */
+char *strstrip(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end != s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ while (*s && isspace(*s))
+ s++;
+
+ return s;
+}
+EXPORT_SYMBOL(strstrip);
+
#ifndef __HAVE_ARCH_STRLEN
/**
* strlen - Find the length of a string
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 6f3093efbd7..2cb4a437942 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -93,7 +93,6 @@
* ==========================================================================
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index c4c1ac5fbd1..0110e441480 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -35,7 +35,6 @@
* matchings spread over multiple fragments, then go BM.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index ca3211206ee..87847c2ae9e 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -26,7 +26,6 @@
* however while in strict mode the average runtime can be better.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 7fd45451b44..3ced628cab4 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -30,7 +30,6 @@
* [2] See finite automation theory
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b07db5ca3f6..bed7229378f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -187,49 +187,49 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
size -= precision;
if (!(type&(ZEROPAD+LEFT))) {
while(size-->0) {
- if (buf <= end)
+ if (buf < end)
*buf = ' ';
++buf;
}
}
if (sign) {
- if (buf <= end)
+ if (buf < end)
*buf = sign;
++buf;
}
if (type & SPECIAL) {
if (base==8) {
- if (buf <= end)
+ if (buf < end)
*buf = '0';
++buf;
} else if (base==16) {
- if (buf <= end)
+ if (buf < end)
*buf = '0';
++buf;
- if (buf <= end)
+ if (buf < end)
*buf = digits[33];
++buf;
}
}
if (!(type & LEFT)) {
while (size-- > 0) {
- if (buf <= end)
+ if (buf < end)
*buf = c;
++buf;
}
}
while (i < precision--) {
- if (buf <= end)
+ if (buf < end)
*buf = '0';
++buf;
}
while (i-- > 0) {
- if (buf <= end)
+ if (buf < end)
*buf = tmp[i];
++buf;
}
while (size-- > 0) {
- if (buf <= end)
+ if (buf < end)
*buf = ' ';
++buf;
}
@@ -272,7 +272,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
/* 'z' changed to 'Z' --davidm 1/25/99 */
/* 't' added for ptrdiff_t */
- /* Reject out-of-range values early */
+ /* Reject out-of-range values early. Large positive sizes are
+ used for unknown buffer sizes. */
if (unlikely((int) size < 0)) {
/* There can be only one.. */
static int warn = 1;
@@ -282,16 +283,17 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
}
str = buf;
- end = buf + size - 1;
+ end = buf + size;
- if (end < buf - 1) {
- end = ((void *) -1);
- size = end - buf + 1;
+ /* Make sure end is always >= buf */
+ if (end < buf) {
+ end = ((void *)-1);
+ size = end - buf;
}
for (; *fmt ; ++fmt) {
if (*fmt != '%') {
- if (str <= end)
+ if (str < end)
*str = *fmt;
++str;
continue;
@@ -357,17 +359,17 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
case 'c':
if (!(flags & LEFT)) {
while (--field_width > 0) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
}
c = (unsigned char) va_arg(args, int);
- if (str <= end)
+ if (str < end)
*str = c;
++str;
while (--field_width > 0) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
@@ -382,18 +384,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
if (!(flags & LEFT)) {
while (len < field_width--) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
}
for (i = 0; i < len; ++i) {
- if (str <= end)
+ if (str < end)
*str = *s;
++str; ++s;
}
while (len < field_width--) {
- if (str <= end)
+ if (str < end)
*str = ' ';
++str;
}
@@ -426,7 +428,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
continue;
case '%':
- if (str <= end)
+ if (str < end)
*str = '%';
++str;
continue;
@@ -449,11 +451,11 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
break;
default:
- if (str <= end)
+ if (str < end)
*str = '%';
++str;
if (*fmt) {
- if (str <= end)
+ if (str < end)
*str = *fmt;
++str;
} else {
@@ -483,14 +485,13 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
str = number(str, end, num, base,
field_width, precision, flags);
}
- if (str <= end)
- *str = '\0';
- else if (size > 0)
- /* don't write out a null byte if the buf size is zero */
- *end = '\0';
- /* the trailing null byte doesn't count towards the total
- * ++str;
- */
+ if (size > 0) {
+ if (str < end)
+ *str = '\0';
+ else
+ end[-1] = '\0';
+ }
+ /* the trailing null byte doesn't count towards the total */
return str-buf;
}
@@ -848,3 +849,26 @@ int sscanf(const char * buf, const char * fmt, ...)
}
EXPORT_SYMBOL(sscanf);
+
+
+/* Simplified asprintf. */
+char *kasprintf(gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ unsigned int len;
+ char *p;
+
+ va_start(ap, fmt);
+ len = vsnprintf(NULL, 0, fmt, ap);
+ va_end(ap);
+
+ p = kmalloc(len+1, gfp);
+ if (!p)
+ return NULL;
+ va_start(ap, fmt);
+ vsnprintf(p, len+1, fmt, ap);
+ va_end(ap);
+ return p;
+}
+
+EXPORT_SYMBOL(kasprintf);
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index 1653dd9bb01..c3e4a2baf83 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -164,34 +164,17 @@ static const config configuration_table[10] = {
memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
/* ========================================================================= */
-int zlib_deflateInit_(
- z_streamp strm,
- int level,
- const char *version,
- int stream_size
-)
-{
- return zlib_deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS,
- DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY, version, stream_size);
- /* To do: ignore strm->next_in if we use it as window */
-}
-
-/* ========================================================================= */
-int zlib_deflateInit2_(
+int zlib_deflateInit2(
z_streamp strm,
int level,
int method,
int windowBits,
int memLevel,
- int strategy,
- const char *version,
- int stream_size
+ int strategy
)
{
deflate_state *s;
int noheader = 0;
- static char* my_version = ZLIB_VERSION;
deflate_workspace *mem;
ush *overlay;
@@ -199,10 +182,6 @@ int zlib_deflateInit2_(
* output size for (length,distance) codes is <= 24 bits.
*/
- if (version == NULL || version[0] != my_version[0] ||
- stream_size != sizeof(z_stream)) {
- return Z_VERSION_ERROR;
- }
if (strm == NULL) return Z_STREAM_ERROR;
strm->msg = NULL;
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
index 767b573d1ef..ccfe25f3920 100644
--- a/lib/zlib_deflate/deflate_syms.c
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -12,8 +12,7 @@
EXPORT_SYMBOL(zlib_deflate_workspacesize);
EXPORT_SYMBOL(zlib_deflate);
-EXPORT_SYMBOL(zlib_deflateInit_);
-EXPORT_SYMBOL(zlib_deflateInit2_);
+EXPORT_SYMBOL(zlib_deflateInit2);
EXPORT_SYMBOL(zlib_deflateEnd);
EXPORT_SYMBOL(zlib_deflateReset);
MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/Makefile b/lib/zlib_inflate/Makefile
index 221c139e0df..bf065482fa6 100644
--- a/lib/zlib_inflate/Makefile
+++ b/lib/zlib_inflate/Makefile
@@ -15,5 +15,5 @@
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate.o
-zlib_inflate-objs := infblock.o infcodes.o inffast.o inflate.o \
- inflate_sync.o inftrees.o infutil.o inflate_syms.o
+zlib_inflate-objs := inffast.o inflate.o \
+ inftrees.o inflate_syms.o
diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c
deleted file mode 100644
index c16cdeff51a..00000000000
--- a/lib/zlib_inflate/infblock.c
+++ /dev/null
@@ -1,365 +0,0 @@
-/* infblock.c -- interpret and process block types to last block
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#include <linux/zutil.h>
-#include "infblock.h"
-#include "inftrees.h"
-#include "infcodes.h"
-#include "infutil.h"
-
-struct inflate_codes_state;
-
-/* simplify the use of the inflate_huft type with some defines */
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* Table for deflate from PKZIP's appnote.txt. */
-static const uInt border[] = { /* Order of the bit length code lengths */
- 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
-
-/*
- Notes beyond the 1.93a appnote.txt:
-
- 1. Distance pointers never point before the beginning of the output
- stream.
- 2. Distance pointers can point back across blocks, up to 32k away.
- 3. There is an implied maximum of 7 bits for the bit length table and
- 15 bits for the actual data.
- 4. If only one code exists, then it is encoded using one bit. (Zero
- would be more efficient, but perhaps a little confusing.) If two
- codes exist, they are coded using one bit each (0 and 1).
- 5. There is no way of sending zero distance codes--a dummy must be
- sent if there are none. (History: a pre 2.0 version of PKZIP would
- store blocks with no distance codes, but this was discovered to be
- too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
- zero distance codes, which is sent as one code of zero bits in
- length.
- 6. There are up to 286 literal/length codes. Code 256 represents the
- end-of-block. Note however that the static length tree defines
- 288 codes just to fill out the Huffman codes. Codes 286 and 287
- cannot be used though, since there is no length base or extra bits
- defined for them. Similarily, there are up to 30 distance codes.
- However, static trees define 32 codes (all 5 bits) to fill out the
- Huffman codes, but the last two had better not show up in the data.
- 7. Unzip can check dynamic Huffman blocks for complete code sets.
- The exception is that a single code would not be complete (see #4).
- 8. The five bits following the block type is really the number of
- literal codes sent minus 257.
- 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
- (1+6+6). Therefore, to output three times the length, you output
- three codes (1+1+1), whereas to output four times the same length,
- you only need two codes (1+3). Hmm.
- 10. In the tree reconstruction algorithm, Code = Code + Increment
- only if BitLength(i) is not zero. (Pretty obvious.)
- 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
- 12. Note: length code 284 can represent 227-258, but length code 285
- really is 258. The last length deserves its own, short code
- since it gets used a lot in very redundant files. The length
- 258 is special since 258 - 3 (the min match length) is 255.
- 13. The literal/length and distance code bit lengths are read as a
- single stream of lengths. It is possible (and advantageous) for
- a repeat code (16, 17, or 18) to go across the boundary between
- the two sets of lengths.
- */
-
-
-void zlib_inflate_blocks_reset(
- inflate_blocks_statef *s,
- z_streamp z,
- uLong *c
-)
-{
- if (c != NULL)
- *c = s->check;
- if (s->mode == CODES)
- zlib_inflate_codes_free(s->sub.decode.codes, z);
- s->mode = TYPE;
- s->bitk = 0;
- s->bitb = 0;
- s->read = s->write = s->window;
- if (s->checkfn != NULL)
- z->adler = s->check = (*s->checkfn)(0L, NULL, 0);
-}
-
-inflate_blocks_statef *zlib_inflate_blocks_new(
- z_streamp z,
- check_func c,
- uInt w
-)
-{
- inflate_blocks_statef *s;
-
- s = &WS(z)->working_blocks_state;
- s->hufts = WS(z)->working_hufts;
- s->window = WS(z)->working_window;
- s->end = s->window + w;
- s->checkfn = c;
- s->mode = TYPE;
- zlib_inflate_blocks_reset(s, z, NULL);
- return s;
-}
-
-
-int zlib_inflate_blocks(
- inflate_blocks_statef *s,
- z_streamp z,
- int r
-)
-{
- uInt t; /* temporary storage */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Byte *p; /* input data pointer */
- uInt n; /* bytes available there */
- Byte *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input based on current state */
- while (1) switch (s->mode)
- {
- case TYPE:
- NEEDBITS(3)
- t = (uInt)b & 7;
- s->last = t & 1;
- switch (t >> 1)
- {
- case 0: /* stored */
- DUMPBITS(3)
- t = k & 7; /* go to byte boundary */
- DUMPBITS(t)
- s->mode = LENS; /* get length of stored block */
- break;
- case 1: /* fixed */
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
-
- zlib_inflate_trees_fixed(&bl, &bd, &tl, &td, s->hufts, z);
- s->sub.decode.codes = zlib_inflate_codes_new(bl, bd, tl, td, z);
- if (s->sub.decode.codes == NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- }
- DUMPBITS(3)
- s->mode = CODES;
- break;
- case 2: /* dynamic */
- DUMPBITS(3)
- s->mode = TABLE;
- break;
- case 3: /* illegal */
- DUMPBITS(3)
- s->mode = B_BAD;
- z->msg = (char*)"invalid block type";
- r = Z_DATA_ERROR;
- LEAVE
- }
- break;
- case LENS:
- NEEDBITS(32)
- if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
- {
- s->mode = B_BAD;
- z->msg = (char*)"invalid stored block lengths";
- r = Z_DATA_ERROR;
- LEAVE
- }
- s->sub.left = (uInt)b & 0xffff;
- b = k = 0; /* dump bits */
- s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
- break;
- case STORED:
- if (n == 0)
- LEAVE
- NEEDOUT
- t = s->sub.left;
- if (t > n) t = n;
- if (t > m) t = m;
- memcpy(q, p, t);
- p += t; n -= t;
- q += t; m -= t;
- if ((s->sub.left -= t) != 0)
- break;
- s->mode = s->last ? DRY : TYPE;
- break;
- case TABLE:
- NEEDBITS(14)
- s->sub.trees.table = t = (uInt)b & 0x3fff;
-#ifndef PKZIP_BUG_WORKAROUND
- if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
- {
- s->mode = B_BAD;
- z->msg = (char*)"too many length or distance symbols";
- r = Z_DATA_ERROR;
- LEAVE
- }
-#endif
- {
- s->sub.trees.blens = WS(z)->working_blens;
- }
- DUMPBITS(14)
- s->sub.trees.index = 0;
- s->mode = BTREE;
- case BTREE:
- while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
- {
- NEEDBITS(3)
- s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
- DUMPBITS(3)
- }
- while (s->sub.trees.index < 19)
- s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
- s->sub.trees.bb = 7;
- t = zlib_inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
- &s->sub.trees.tb, s->hufts, z);
- if (t != Z_OK)
- {
- r = t;
- if (r == Z_DATA_ERROR)
- s->mode = B_BAD;
- LEAVE
- }
- s->sub.trees.index = 0;
- s->mode = DTREE;
- case DTREE:
- while (t = s->sub.trees.table,
- s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
- {
- inflate_huft *h;
- uInt i, j, c;
-
- t = s->sub.trees.bb;
- NEEDBITS(t)
- h = s->sub.trees.tb + ((uInt)b & zlib_inflate_mask[t]);
- t = h->bits;
- c = h->base;
- if (c < 16)
- {
- DUMPBITS(t)
- s->sub.trees.blens[s->sub.trees.index++] = c;
- }
- else /* c == 16..18 */
- {
- i = c == 18 ? 7 : c - 14;
- j = c == 18 ? 11 : 3;
- NEEDBITS(t + i)
- DUMPBITS(t)
- j += (uInt)b & zlib_inflate_mask[i];
- DUMPBITS(i)
- i = s->sub.trees.index;
- t = s->sub.trees.table;
- if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
- (c == 16 && i < 1))
- {
- s->mode = B_BAD;
- z->msg = (char*)"invalid bit length repeat";
- r = Z_DATA_ERROR;
- LEAVE
- }
- c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
- do {
- s->sub.trees.blens[i++] = c;
- } while (--j);
- s->sub.trees.index = i;
- }
- }
- s->sub.trees.tb = NULL;
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
- inflate_codes_statef *c;
-
- bl = 9; /* must be <= 9 for lookahead assumptions */
- bd = 6; /* must be <= 9 for lookahead assumptions */
- t = s->sub.trees.table;
- t = zlib_inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
- s->sub.trees.blens, &bl, &bd, &tl, &td,
- s->hufts, z);
- if (t != Z_OK)
- {
- if (t == (uInt)Z_DATA_ERROR)
- s->mode = B_BAD;
- r = t;
- LEAVE
- }
- if ((c = zlib_inflate_codes_new(bl, bd, tl, td, z)) == NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- s->sub.decode.codes = c;
- }
- s->mode = CODES;
- case CODES:
- UPDATE
- if ((r = zlib_inflate_codes(s, z, r)) != Z_STREAM_END)
- return zlib_inflate_flush(s, z, r);
- r = Z_OK;
- zlib_inflate_codes_free(s->sub.decode.codes, z);
- LOAD
- if (!s->last)
- {
- s->mode = TYPE;
- break;
- }
- s->mode = DRY;
- case DRY:
- FLUSH
- if (s->read != s->write)
- LEAVE
- s->mode = B_DONE;
- case B_DONE:
- r = Z_STREAM_END;
- LEAVE
- case B_BAD:
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-}
-
-
-int zlib_inflate_blocks_free(
- inflate_blocks_statef *s,
- z_streamp z
-)
-{
- zlib_inflate_blocks_reset(s, z, NULL);
- return Z_OK;
-}
-
-
-#if 0
-void zlib_inflate_set_dictionary(
- inflate_blocks_statef *s,
- const Byte *d,
- uInt n
-)
-{
- memcpy(s->window, d, n);
- s->read = s->write = s->window + n;
-}
-#endif /* 0 */
-
-
-/* Returns true if inflate is currently at the end of a block generated
- * by Z_SYNC_FLUSH or Z_FULL_FLUSH.
- * IN assertion: s != NULL
- */
-#if 0
-int zlib_inflate_blocks_sync_point(
- inflate_blocks_statef *s
-)
-{
- return s->mode == LENS;
-}
-#endif /* 0 */
diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h
deleted file mode 100644
index ceee60b5107..00000000000
--- a/lib/zlib_inflate/infblock.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* infblock.h -- header to use infblock.c
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-#ifndef _INFBLOCK_H
-#define _INFBLOCK_H
-
-struct inflate_blocks_state;
-typedef struct inflate_blocks_state inflate_blocks_statef;
-
-extern inflate_blocks_statef * zlib_inflate_blocks_new (
- z_streamp z,
- check_func c, /* check function */
- uInt w); /* window size */
-
-extern int zlib_inflate_blocks (
- inflate_blocks_statef *,
- z_streamp ,
- int); /* initial return code */
-
-extern void zlib_inflate_blocks_reset (
- inflate_blocks_statef *,
- z_streamp ,
- uLong *); /* check value on output */
-
-extern int zlib_inflate_blocks_free (
- inflate_blocks_statef *,
- z_streamp);
-
-#if 0
-extern void zlib_inflate_set_dictionary (
- inflate_blocks_statef *s,
- const Byte *d, /* dictionary */
- uInt n); /* dictionary length */
-#endif /* 0 */
-
-#if 0
-extern int zlib_inflate_blocks_sync_point (
- inflate_blocks_statef *s);
-#endif /* 0 */
-
-#endif /* _INFBLOCK_H */
diff --git a/lib/zlib_inflate/infcodes.c b/lib/zlib_inflate/infcodes.c
deleted file mode 100644
index 07cd7591cbb..00000000000
--- a/lib/zlib_inflate/infcodes.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/* infcodes.c -- process literals and length/distance pairs
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#include <linux/zutil.h>
-#include "inftrees.h"
-#include "infblock.h"
-#include "infcodes.h"
-#include "infutil.h"
-#include "inffast.h"
-
-/* simplify the use of the inflate_huft type with some defines */
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-inflate_codes_statef *zlib_inflate_codes_new(
- uInt bl,
- uInt bd,
- inflate_huft *tl,
- inflate_huft *td, /* need separate declaration for Borland C++ */
- z_streamp z
-)
-{
- inflate_codes_statef *c;
-
- c = &WS(z)->working_state;
- {
- c->mode = START;
- c->lbits = (Byte)bl;
- c->dbits = (Byte)bd;
- c->ltree = tl;
- c->dtree = td;
- }
- return c;
-}
-
-
-int zlib_inflate_codes(
- inflate_blocks_statef *s,
- z_streamp z,
- int r
-)
-{
- uInt j; /* temporary storage */
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Byte *p; /* input data pointer */
- uInt n; /* bytes available there */
- Byte *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- Byte *f; /* pointer to copy strings from */
- inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input and output based on current state */
- while (1) switch (c->mode)
- { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- case START: /* x: set up for LEN */
-#ifndef SLOW
- if (m >= 258 && n >= 10)
- {
- UPDATE
- r = zlib_inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
- LOAD
- if (r != Z_OK)
- {
- c->mode = r == Z_STREAM_END ? WASH : BADCODE;
- break;
- }
- }
-#endif /* !SLOW */
- c->sub.code.need = c->lbits;
- c->sub.code.tree = c->ltree;
- c->mode = LEN;
- case LEN: /* i: get length/literal/eob next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e == 0) /* literal */
- {
- c->sub.lit = t->base;
- c->mode = LIT;
- break;
- }
- if (e & 16) /* length */
- {
- c->sub.copy.get = e & 15;
- c->len = t->base;
- c->mode = LENEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t + t->base;
- break;
- }
- if (e & 32) /* end of block */
- {
- c->mode = WASH;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid literal/length code";
- r = Z_DATA_ERROR;
- LEAVE
- case LENEXT: /* i: getting length extra (have base) */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->len += (uInt)b & zlib_inflate_mask[j];
- DUMPBITS(j)
- c->sub.code.need = c->dbits;
- c->sub.code.tree = c->dtree;
- c->mode = DIST;
- case DIST: /* i: get distance next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e & 16) /* distance */
- {
- c->sub.copy.get = e & 15;
- c->sub.copy.dist = t->base;
- c->mode = DISTEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t + t->base;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid distance code";
- r = Z_DATA_ERROR;
- LEAVE
- case DISTEXT: /* i: getting distance extra */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->sub.copy.dist += (uInt)b & zlib_inflate_mask[j];
- DUMPBITS(j)
- c->mode = COPY;
- case COPY: /* o: copying bytes in window, waiting for space */
- f = q - c->sub.copy.dist;
- while (f < s->window) /* modulo window size-"while" instead */
- f += s->end - s->window; /* of "if" handles invalid distances */
- while (c->len)
- {
- NEEDOUT
- OUTBYTE(*f++)
- if (f == s->end)
- f = s->window;
- c->len--;
- }
- c->mode = START;
- break;
- case LIT: /* o: got literal, waiting for output space */
- NEEDOUT
- OUTBYTE(c->sub.lit)
- c->mode = START;
- break;
- case WASH: /* o: got eob, possibly more output */
- if (k > 7) /* return unused byte, if any */
- {
- k -= 8;
- n++;
- p--; /* can always return one */
- }
- FLUSH
- if (s->read != s->write)
- LEAVE
- c->mode = END;
- case END:
- r = Z_STREAM_END;
- LEAVE
- case BADCODE: /* x: got error */
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-#ifdef NEED_DUMMY_RETURN
- return Z_STREAM_ERROR; /* Some dumb compilers complain without this */
-#endif
-}
-
-
-void zlib_inflate_codes_free(
- inflate_codes_statef *c,
- z_streamp z
-)
-{
-}
diff --git a/lib/zlib_inflate/infcodes.h b/lib/zlib_inflate/infcodes.h
deleted file mode 100644
index 5cff417523b..00000000000
--- a/lib/zlib_inflate/infcodes.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* infcodes.h -- header to use infcodes.c
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-#ifndef _INFCODES_H
-#define _INFCODES_H
-
-#include "infblock.h"
-
-struct inflate_codes_state;
-typedef struct inflate_codes_state inflate_codes_statef;
-
-extern inflate_codes_statef *zlib_inflate_codes_new (
- uInt, uInt,
- inflate_huft *, inflate_huft *,
- z_streamp );
-
-extern int zlib_inflate_codes (
- inflate_blocks_statef *,
- z_streamp ,
- int);
-
-extern void zlib_inflate_codes_free (
- inflate_codes_statef *,
- z_streamp );
-
-#endif /* _INFCODES_H */
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 0bd7623fc85..d84560c076d 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -1,176 +1,312 @@
-/* inffast.c -- process literals and length/distance pairs fast
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+/* inffast.c -- fast decoding
+ * Copyright (C) 1995-2004 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
#include <linux/zutil.h>
#include "inftrees.h"
-#include "infblock.h"
-#include "infcodes.h"
-#include "infutil.h"
+#include "inflate.h"
#include "inffast.h"
-struct inflate_codes_state;
-
-/* simplify the use of the inflate_huft type with some defines */
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* macros for bit input with no checking and for returning unused bytes */
-#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define UNGRAB {c=z->avail_in-n;c=(k>>3)<c?k>>3:c;n+=c;p-=c;k-=c<<3;}
-
-/* Called with number of bytes left to write in window at least 258
- (the maximum string length) and number of input bytes available
- at least ten. The ten bytes are six bytes for the longest length/
- distance pair plus four bytes for overloading the bit buffer. */
-
-int zlib_inflate_fast(
- uInt bl,
- uInt bd,
- inflate_huft *tl,
- inflate_huft *td, /* need separate declaration for Borland C++ */
- inflate_blocks_statef *s,
- z_streamp z
-)
+#ifndef ASMINF
+
+/* Allow machine dependent optimization for post-increment or pre-increment.
+ Based on testing to date,
+ Pre-increment preferred for:
+ - PowerPC G3 (Adler)
+ - MIPS R5000 (Randers-Pehrson)
+ Post-increment preferred for:
+ - none
+ No measurable difference:
+ - Pentium III (Anderson)
+ - M68060 (Nikl)
+ */
+#ifdef POSTINC
+# define OFF 0
+# define PUP(a) *(a)++
+#else
+# define OFF 1
+# define PUP(a) *++(a)
+#endif
+
+/*
+ Decode literal, length, and distance codes and write out the resulting
+ literal and match bytes until either not enough input or output is
+ available, an end-of-block is encountered, or a data error is encountered.
+ When large enough input and output buffers are supplied to inflate(), for
+ example, a 16K input buffer and a 64K output buffer, more than 95% of the
+ inflate execution time is spent in this routine.
+
+ Entry assumptions:
+
+ state->mode == LEN
+ strm->avail_in >= 6
+ strm->avail_out >= 258
+ start >= strm->avail_out
+ state->bits < 8
+
+ On return, state->mode is one of:
+
+ LEN -- ran out of enough output space or enough available input
+ TYPE -- reached end of block code, inflate() to interpret next block
+ BAD -- error in block data
+
+ Notes:
+
+ - The maximum input bits used by a length/distance pair is 15 bits for the
+ length code, 5 bits for the length extra, 15 bits for the distance code,
+ and 13 bits for the distance extra. This totals 48 bits, or six bytes.
+ Therefore if strm->avail_in >= 6, then there is enough input to avoid
+ checking for available input while decoding.
+
+ - The maximum bytes that a single length/distance pair can output is 258
+ bytes, which is the maximum length that can be coded. inflate_fast()
+ requires strm->avail_out >= 258 for each loop to avoid checking for
+ output space.
+
+ - @start: inflate()'s starting value for strm->avail_out
+ */
+void inflate_fast(z_streamp strm, unsigned start)
{
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Byte *p; /* input data pointer */
- uInt n; /* bytes available there */
- Byte *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- uInt ml; /* mask for literal/length tree */
- uInt md; /* mask for distance tree */
- uInt c; /* bytes to copy */
- uInt d; /* distance back to copy from */
- Byte *r; /* copy source pointer */
-
- /* load input, output, bit values */
- LOAD
-
- /* initialize masks */
- ml = zlib_inflate_mask[bl];
- md = zlib_inflate_mask[bd];
-
- /* do until not enough input or output space for fast loop */
- do { /* assume called with m >= 258 && n >= 10 */
- /* get literal/length code */
- GRABBITS(20) /* max bits for literal/length code */
- if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
- {
- DUMPBITS(t->bits)
- *q++ = (Byte)t->base;
- m--;
- continue;
- }
+ struct inflate_state *state;
+ unsigned char *in; /* local strm->next_in */
+ unsigned char *last; /* while in < last, enough input available */
+ unsigned char *out; /* local strm->next_out */
+ unsigned char *beg; /* inflate()'s initial strm->next_out */
+ unsigned char *end; /* while out < end, enough space available */
+#ifdef INFLATE_STRICT
+ unsigned dmax; /* maximum distance from zlib header */
+#endif
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned write; /* window write index */
+ unsigned char *window; /* allocated sliding window, if wsize != 0 */
+ unsigned long hold; /* local strm->hold */
+ unsigned bits; /* local strm->bits */
+ code const *lcode; /* local strm->lencode */
+ code const *dcode; /* local strm->distcode */
+ unsigned lmask; /* mask for first level of length codes */
+ unsigned dmask; /* mask for first level of distance codes */
+ code this; /* retrieved table entry */
+ unsigned op; /* code bits, operation, extra bits, or */
+ /* window position, window bytes to copy */
+ unsigned len; /* match length, unused bytes */
+ unsigned dist; /* match distance */
+ unsigned char *from; /* where to copy match from */
+
+ /* copy state to local variables */
+ state = (struct inflate_state *)strm->state;
+ in = strm->next_in - OFF;
+ last = in + (strm->avail_in - 5);
+ out = strm->next_out - OFF;
+ beg = out - (start - strm->avail_out);
+ end = out + (strm->avail_out - 257);
+#ifdef INFLATE_STRICT
+ dmax = state->dmax;
+#endif
+ wsize = state->wsize;
+ whave = state->whave;
+ write = state->write;
+ window = state->window;
+ hold = state->hold;
+ bits = state->bits;
+ lcode = state->lencode;
+ dcode = state->distcode;
+ lmask = (1U << state->lenbits) - 1;
+ dmask = (1U << state->distbits) - 1;
+
+ /* decode literals and length/distances until end-of-block or not enough
+ input data or output space */
do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits for length */
- e &= 15;
- c = t->base + ((uInt)b & zlib_inflate_mask[e]);
- DUMPBITS(e)
-
- /* decode distance base of block to copy */
- GRABBITS(15); /* max bits for distance code */
- e = (t = td + ((uInt)b & md))->exop;
- do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits to add to distance base */
- e &= 15;
- GRABBITS(e) /* get extra bits (up to 13) */
- d = t->base + ((uInt)b & zlib_inflate_mask[e]);
- DUMPBITS(e)
-
- /* do the copy */
- m -= c;
- r = q - d;
- if (r < s->window) /* wrap if needed */
- {
- do {
- r += s->end - s->window; /* force pointer in window */
- } while (r < s->window); /* covers invalid distances */
- e = s->end - r;
- if (c > e)
- {
- c -= e; /* wrapped copy */
- do {
- *q++ = *r++;
- } while (--e);
- r = s->window;
- do {
- *q++ = *r++;
- } while (--c);
- }
- else /* normal copy */
- {
- *q++ = *r++; c--;
- *q++ = *r++; c--;
- do {
- *q++ = *r++;
- } while (--c);
- }
+ if (bits < 15) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ this = lcode[hold & lmask];
+ dolen:
+ op = (unsigned)(this.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(this.op);
+ if (op == 0) { /* literal */
+ PUP(out) = (unsigned char)(this.val);
+ }
+ else if (op & 16) { /* length base */
+ len = (unsigned)(this.val);
+ op &= 15; /* number of extra bits */
+ if (op) {
+ if (bits < op) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ len += (unsigned)hold & ((1U << op) - 1);
+ hold >>= op;
+ bits -= op;
+ }
+ if (bits < 15) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ this = dcode[hold & dmask];
+ dodist:
+ op = (unsigned)(this.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(this.op);
+ if (op & 16) { /* distance base */
+ dist = (unsigned)(this.val);
+ op &= 15; /* number of extra bits */
+ if (bits < op) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ if (bits < op) {
+ hold += (unsigned long)(PUP(in)) << bits;
+ bits += 8;
+ }
+ }
+ dist += (unsigned)hold & ((1U << op) - 1);
+#ifdef INFLATE_STRICT
+ if (dist > dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(out - beg); /* max distance in output */
+ if (dist > op) { /* see if copy from window */
+ op = dist - op; /* distance back in window */
+ if (op > whave) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+ from = window - OFF;
+ if (write == 0) { /* very common case */
+ from += wsize - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ else if (write < op) { /* wrap around window */
+ from += wsize + write - op;
+ op -= write;
+ if (op < len) { /* some from end of window */
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = window - OFF;
+ if (write < len) { /* some from start of window */
+ op = write;
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ }
+ else { /* contiguous in window */
+ from += write - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ PUP(out) = PUP(from);
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ while (len > 2) {
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ len -= 3;
+ }
+ if (len) {
+ PUP(out) = PUP(from);
+ if (len > 1)
+ PUP(out) = PUP(from);
+ }
+ }
+ else {
+ from = out - dist; /* copy direct from output */
+ do { /* minimum length is three */
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ len -= 3;
+ } while (len > 2);
+ if (len) {
+ PUP(out) = PUP(from);
+ if (len > 1)
+ PUP(out) = PUP(from);
+ }
+ }
}
- else /* normal copy */
- {
- *q++ = *r++; c--;
- *q++ = *r++; c--;
- do {
- *q++ = *r++;
- } while (--c);
+ else if ((op & 64) == 0) { /* 2nd level distance code */
+ this = dcode[this.val + (hold & ((1U << op) - 1))];
+ goto dodist;
}
+ else {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ }
+ else if ((op & 64) == 0) { /* 2nd level length code */
+ this = lcode[this.val + (hold & ((1U << op) - 1))];
+ goto dolen;
+ }
+ else if (op & 32) { /* end-of-block */
+ state->mode = TYPE;
break;
- }
- else if ((e & 64) == 0)
- {
- t += t->base;
- e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop;
- }
- else
- {
- z->msg = (char*)"invalid distance code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- break;
- }
- if ((e & 64) == 0)
- {
- t += t->base;
- if ((e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop) == 0)
- {
- DUMPBITS(t->bits)
- *q++ = (Byte)t->base;
- m--;
- break;
}
- }
- else if (e & 32)
- {
- UNGRAB
- UPDATE
- return Z_STREAM_END;
- }
- else
- {
- z->msg = (char*)"invalid literal/length code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- } while (m >= 258 && n >= 10);
-
- /* not enough input or output--restore pointers and return */
- UNGRAB
- UPDATE
- return Z_OK;
+ else {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ } while (in < last && out < end);
+
+ /* return unused bytes (on entry, bits < 8, so in won't go too far back) */
+ len = bits >> 3;
+ in -= len;
+ bits -= len << 3;
+ hold &= (1U << bits) - 1;
+
+ /* update state and return */
+ strm->next_in = in + OFF;
+ strm->next_out = out + OFF;
+ strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
+ strm->avail_out = (unsigned)(out < end ?
+ 257 + (end - out) : 257 - (out - end));
+ state->hold = hold;
+ state->bits = bits;
+ return;
}
+
+/*
+ inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
+ - Using bit fields for code structure
+ - Different op definition to avoid & for extra bits (do & for table bits)
+ - Three separate decoding do-loops for direct, window, and write == 0
+ - Special case for distance > 1 copies to do overlapped load and store copy
+ - Explicit branch predictions (based on measured branch probabilities)
+ - Deferring match copy and interspersed it with decoding subsequent codes
+ - Swapping literal/length else
+ - Swapping window/direct else
+ - Larger unrolled copy loops (three is about right)
+ - Moving len -= 3 statement into middle of loop
+ */
+
+#endif /* !ASMINF */
diff --git a/lib/zlib_inflate/inffast.h b/lib/zlib_inflate/inffast.h
index fc720f0fa7f..40315d9fddc 100644
--- a/lib/zlib_inflate/inffast.h
+++ b/lib/zlib_inflate/inffast.h
@@ -1,6 +1,6 @@
/* inffast.h -- header to use inffast.c
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * Copyright (C) 1995-2003 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -8,10 +8,4 @@
subject to change. Applications should only use zlib.h.
*/
-extern int zlib_inflate_fast (
- uInt,
- uInt,
- inflate_huft *,
- inflate_huft *,
- inflate_blocks_statef *,
- z_streamp );
+void inflate_fast (z_streamp strm, unsigned start);
diff --git a/lib/zlib_inflate/inffixed.h b/lib/zlib_inflate/inffixed.h
new file mode 100644
index 00000000000..75ed4b5978d
--- /dev/null
+++ b/lib/zlib_inflate/inffixed.h
@@ -0,0 +1,94 @@
+ /* inffixed.h -- table for decoding fixed codes
+ * Generated automatically by makefixed().
+ */
+
+ /* WARNING: this file should *not* be used by applications. It
+ is part of the implementation of the compression library and
+ is subject to change. Applications should only use zlib.h.
+ */
+
+ static const code lenfix[512] = {
+ {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
+ {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
+ {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
+ {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
+ {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
+ {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
+ {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
+ {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
+ {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
+ {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
+ {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
+ {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
+ {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
+ {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
+ {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
+ {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
+ {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
+ {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
+ {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
+ {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
+ {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
+ {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
+ {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
+ {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
+ {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
+ {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
+ {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
+ {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
+ {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
+ {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
+ {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
+ {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
+ {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
+ {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
+ {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
+ {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
+ {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
+ {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
+ {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
+ {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
+ {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
+ {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
+ {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
+ {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
+ {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
+ {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
+ {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
+ {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
+ {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
+ {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
+ {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
+ {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
+ {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
+ {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
+ {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
+ {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
+ {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
+ {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
+ {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
+ {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
+ {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
+ {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
+ {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
+ {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
+ {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
+ {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
+ {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
+ {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
+ {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
+ {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
+ {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
+ {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
+ {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
+ {0,9,255}
+ };
+
+ static const code distfix[32] = {
+ {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
+ {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
+ {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
+ {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
+ {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
+ {22,5,193},{64,5,0}
+ };
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 31b9e9054bf..7f922dccf1a 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -1,89 +1,148 @@
-/* inflate.c -- zlib interface to inflate modules
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+/* inflate.c -- zlib decompression
+ * Copyright (C) 1995-2005 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ *
+ * Based on zlib 1.2.3 but modified for the Linux Kernel by
+ * Richard Purdie <richard@openedhand.com>
+ *
+ * Changes mainly for static instead of dynamic memory allocation
+ *
*/
#include <linux/zutil.h>
-#include "infblock.h"
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
#include "infutil.h"
int zlib_inflate_workspacesize(void)
{
- return sizeof(struct inflate_workspace);
+ return sizeof(struct inflate_workspace);
}
+int zlib_inflateReset(z_streamp strm)
+{
+ struct inflate_state *state;
+
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state *)strm->state;
+ strm->total_in = strm->total_out = state->total = 0;
+ strm->msg = NULL;
+ strm->adler = 1; /* to support ill-conceived Java test suite */
+ state->mode = HEAD;
+ state->last = 0;
+ state->havedict = 0;
+ state->dmax = 32768U;
+ state->hold = 0;
+ state->bits = 0;
+ state->lencode = state->distcode = state->next = state->codes;
-int zlib_inflateReset(
- z_streamp z
-)
+ /* Initialise Window */
+ state->wsize = 1U << state->wbits;
+ state->write = 0;
+ state->whave = 0;
+
+ return Z_OK;
+}
+
+#if 0
+int zlib_inflatePrime(z_streamp strm, int bits, int value)
{
- if (z == NULL || z->state == NULL || z->workspace == NULL)
- return Z_STREAM_ERROR;
- z->total_in = z->total_out = 0;
- z->msg = NULL;
- z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
- zlib_inflate_blocks_reset(z->state->blocks, z, NULL);
- return Z_OK;
+ struct inflate_state *state;
+
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state *)strm->state;
+ if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR;
+ value &= (1L << bits) - 1;
+ state->hold += value << state->bits;
+ state->bits += bits;
+ return Z_OK;
}
+#endif
+
+int zlib_inflateInit2(z_streamp strm, int windowBits)
+{
+ struct inflate_state *state;
+
+ if (strm == NULL) return Z_STREAM_ERROR;
+ strm->msg = NULL; /* in case we return an error */
+
+ state = &WS(strm)->inflate_state;
+ strm->state = (struct internal_state *)state;
+
+ if (windowBits < 0) {
+ state->wrap = 0;
+ windowBits = -windowBits;
+ }
+ else {
+ state->wrap = (windowBits >> 4) + 1;
+ }
+ if (windowBits < 8 || windowBits > 15) {
+ return Z_STREAM_ERROR;
+ }
+ state->wbits = (unsigned)windowBits;
+ state->window = &WS(strm)->working_window[0];
+ return zlib_inflateReset(strm);
+}
-int zlib_inflateEnd(
- z_streamp z
-)
+/*
+ Return state with length and distance decoding tables and index sizes set to
+ fixed code decoding. This returns fixed tables from inffixed.h.
+ */
+static void zlib_fixedtables(struct inflate_state *state)
{
- if (z == NULL || z->state == NULL || z->workspace == NULL)
- return Z_STREAM_ERROR;
- if (z->state->blocks != NULL)
- zlib_inflate_blocks_free(z->state->blocks, z);
- z->state = NULL;
- return Z_OK;
+# include "inffixed.h"
+ state->lencode = lenfix;
+ state->lenbits = 9;
+ state->distcode = distfix;
+ state->distbits = 5;
}
-int zlib_inflateInit2_(
- z_streamp z,
- int w,
- const char *version,
- int stream_size
-)
+/*
+ Update the window with the last wsize (normally 32K) bytes written before
+ returning. This is only called when a window is already in use, or when
+ output has been written during this inflate call, but the end of the deflate
+ stream has not been reached yet. It is also called to window dictionary data
+ when a dictionary is loaded.
+
+ Providing output buffers larger than 32K to inflate() should provide a speed
+ advantage, since only the last 32K of output is copied to the sliding window
+ upon return from inflate(), and since all distances after the first 32K of
+ output will fall in the output data, making match copies simpler and faster.
+ The advantage may be dependent on the size of the processor's data caches.
+ */
+static void zlib_updatewindow(z_streamp strm, unsigned out)
{
- if (version == NULL || version[0] != ZLIB_VERSION[0] ||
- stream_size != sizeof(z_stream) || z->workspace == NULL)
- return Z_VERSION_ERROR;
-
- /* initialize state */
- z->msg = NULL;
- z->state = &WS(z)->internal_state;
- z->state->blocks = NULL;
-
- /* handle undocumented nowrap option (no zlib header or check) */
- z->state->nowrap = 0;
- if (w < 0)
- {
- w = - w;
- z->state->nowrap = 1;
- }
-
- /* set window size */
- if (w < 8 || w > 15)
- {
- zlib_inflateEnd(z);
- return Z_STREAM_ERROR;
- }
- z->state->wbits = (uInt)w;
-
- /* create inflate_blocks state */
- if ((z->state->blocks =
- zlib_inflate_blocks_new(z, z->state->nowrap ? NULL : zlib_adler32, (uInt)1 << w))
- == NULL)
- {
- zlib_inflateEnd(z);
- return Z_MEM_ERROR;
- }
-
- /* reset state */
- zlib_inflateReset(z);
- return Z_OK;
+ struct inflate_state *state;
+ unsigned copy, dist;
+
+ state = (struct inflate_state *)strm->state;
+
+ /* copy state->wsize or less output bytes into the circular window */
+ copy = out - strm->avail_out;
+ if (copy >= state->wsize) {
+ memcpy(state->window, strm->next_out - state->wsize, state->wsize);
+ state->write = 0;
+ state->whave = state->wsize;
+ }
+ else {
+ dist = state->wsize - state->write;
+ if (dist > copy) dist = copy;
+ memcpy(state->window + state->write, strm->next_out - copy, dist);
+ copy -= dist;
+ if (copy) {
+ memcpy(state->window, strm->next_out - copy, copy);
+ state->write = copy;
+ state->whave = state->wsize;
+ }
+ else {
+ state->write += dist;
+ if (state->write == state->wsize) state->write = 0;
+ if (state->whave < state->wsize) state->whave += dist;
+ }
+ }
}
@@ -91,157 +150,764 @@ int zlib_inflateInit2_(
* At the end of a Deflate-compressed PPP packet, we expect to have seen
* a `stored' block type value but not the (zero) length bytes.
*/
-static int zlib_inflate_packet_flush(inflate_blocks_statef *s)
+/*
+ Returns true if inflate is currently at the end of a block generated by
+ Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ implementation to provide an additional safety check. PPP uses
+ Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
+ block. When decompressing, PPP checks that at the end of input packet,
+ inflate is waiting for these length bytes.
+ */
+static int zlib_inflateSyncPacket(z_streamp strm)
{
- if (s->mode != LENS)
- return Z_DATA_ERROR;
- s->mode = TYPE;
+ struct inflate_state *state;
+
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state *)strm->state;
+
+ if (state->mode == STORED && state->bits == 0) {
+ state->mode = TYPE;
+ return Z_OK;
+ }
+ return Z_DATA_ERROR;
+}
+
+/* Macros for inflate(): */
+
+/* check function to use adler32() for zlib or crc32() for gzip */
+#define UPDATE(check, buf, len) zlib_adler32(check, buf, len)
+
+/* Load registers with state in inflate() for speed */
+#define LOAD() \
+ do { \
+ put = strm->next_out; \
+ left = strm->avail_out; \
+ next = strm->next_in; \
+ have = strm->avail_in; \
+ hold = state->hold; \
+ bits = state->bits; \
+ } while (0)
+
+/* Restore state from registers in inflate() */
+#define RESTORE() \
+ do { \
+ strm->next_out = put; \
+ strm->avail_out = left; \
+ strm->next_in = next; \
+ strm->avail_in = have; \
+ state->hold = hold; \
+ state->bits = bits; \
+ } while (0)
+
+/* Clear the input bit accumulator */
+#define INITBITS() \
+ do { \
+ hold = 0; \
+ bits = 0; \
+ } while (0)
+
+/* Get a byte of input into the bit accumulator, or return from inflate()
+ if there is no input available. */
+#define PULLBYTE() \
+ do { \
+ if (have == 0) goto inf_leave; \
+ have--; \
+ hold += (unsigned long)(*next++) << bits; \
+ bits += 8; \
+ } while (0)
+
+/* Assure that there are at least n bits in the bit accumulator. If there is
+ not enough available input to do that, then return from inflate(). */
+#define NEEDBITS(n) \
+ do { \
+ while (bits < (unsigned)(n)) \
+ PULLBYTE(); \
+ } while (0)
+
+/* Return the low n bits of the bit accumulator (n < 16) */
+#define BITS(n) \
+ ((unsigned)hold & ((1U << (n)) - 1))
+
+/* Remove n bits from the bit accumulator */
+#define DROPBITS(n) \
+ do { \
+ hold >>= (n); \
+ bits -= (unsigned)(n); \
+ } while (0)
+
+/* Remove zero to seven bits as needed to go to a byte boundary */
+#define BYTEBITS() \
+ do { \
+ hold >>= bits & 7; \
+ bits -= bits & 7; \
+ } while (0)
+
+/* Reverse the bytes in a 32-bit value */
+#define REVERSE(q) \
+ ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
+ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
+
+/*
+ inflate() uses a state machine to process as much input data and generate as
+ much output data as possible before returning. The state machine is
+ structured roughly as follows:
+
+ for (;;) switch (state) {
+ ...
+ case STATEn:
+ if (not enough input data or output space to make progress)
+ return;
+ ... make progress ...
+ state = STATEm;
+ break;
+ ...
+ }
+
+ so when inflate() is called again, the same case is attempted again, and
+ if the appropriate resources are provided, the machine proceeds to the
+ next state. The NEEDBITS() macro is usually the way the state evaluates
+ whether it can proceed or should return. NEEDBITS() does the return if
+ the requested bits are not available. The typical use of the BITS macros
+ is:
+
+ NEEDBITS(n);
+ ... do something with BITS(n) ...
+ DROPBITS(n);
+
+ where NEEDBITS(n) either returns from inflate() if there isn't enough
+ input left to load n bits into the accumulator, or it continues. BITS(n)
+ gives the low n bits in the accumulator. When done, DROPBITS(n) drops
+ the low n bits off the accumulator. INITBITS() clears the accumulator
+ and sets the number of available bits to zero. BYTEBITS() discards just
+ enough bits to put the accumulator on a byte boundary. After BYTEBITS()
+ and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
+
+ NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
+ if there is no input available. The decoding of variable length codes uses
+ PULLBYTE() directly in order to pull just enough bytes to decode the next
+ code, and no more.
+
+ Some states loop until they get enough input, making sure that enough
+ state information is maintained to continue the loop where it left off
+ if NEEDBITS() returns in the loop. For example, want, need, and keep
+ would all have to actually be part of the saved state in case NEEDBITS()
+ returns:
+
+ case STATEw:
+ while (want < need) {
+ NEEDBITS(n);
+ keep[want++] = BITS(n);
+ DROPBITS(n);
+ }
+ state = STATEx;
+ case STATEx:
+
+ As shown above, if the next state is also the next case, then the break
+ is omitted.
+
+ A state may also return if there is not enough output space available to
+ complete that state. Those states are copying stored data, writing a
+ literal byte, and copying a matching string.
+
+ When returning, a "goto inf_leave" is used to update the total counters,
+ update the check value, and determine whether any progress has been made
+ during that inflate() call in order to return the proper return code.
+ Progress is defined as a change in either strm->avail_in or strm->avail_out.
+ When there is a window, goto inf_leave will update the window with the last
+ output written. If a goto inf_leave occurs in the middle of decompression
+ and there is no window currently, goto inf_leave will create one and copy
+ output to the window for the next call of inflate().
+
+ In this implementation, the flush parameter of inflate() only affects the
+ return code (per zlib.h). inflate() always writes as much as possible to
+ strm->next_out, given the space available and the provided input--the effect
+ documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
+ the allocation of and copying into a sliding window until necessary, which
+ provides the effect documented in zlib.h for Z_FINISH when the entire input
+ stream available. So the only thing the flush parameter actually does is:
+ when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
+ will return Z_BUF_ERROR if it has not reached the end of the stream.
+ */
+
+int zlib_inflate(z_streamp strm, int flush)
+{
+ struct inflate_state *state;
+ unsigned char *next; /* next input */
+ unsigned char *put; /* next output */
+ unsigned have, left; /* available input and output */
+ unsigned long hold; /* bit buffer */
+ unsigned bits; /* bits in bit buffer */
+ unsigned in, out; /* save starting available input and output */
+ unsigned copy; /* number of stored or match bytes to copy */
+ unsigned char *from; /* where to copy match bytes from */
+ code this; /* current decoding table entry */
+ code last; /* parent table entry */
+ unsigned len; /* length to copy for repeats, bits to drop */
+ int ret; /* return code */
+ static const unsigned short order[19] = /* permutation of code lengths */
+ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+ if (strm == NULL || strm->state == NULL || strm->next_out == NULL ||
+ (strm->next_in == NULL && strm->avail_in != 0))
+ return Z_STREAM_ERROR;
+
+ state = (struct inflate_state *)strm->state;
+
+ if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
+ LOAD();
+ in = have;
+ out = left;
+ ret = Z_OK;
+ for (;;)
+ switch (state->mode) {
+ case HEAD:
+ if (state->wrap == 0) {
+ state->mode = TYPEDO;
+ break;
+ }
+ NEEDBITS(16);
+ if (
+ ((BITS(8) << 8) + (hold >> 8)) % 31) {
+ strm->msg = (char *)"incorrect header check";
+ state->mode = BAD;
+ break;
+ }
+ if (BITS(4) != Z_DEFLATED) {
+ strm->msg = (char *)"unknown compression method";
+ state->mode = BAD;
+ break;
+ }
+ DROPBITS(4);
+ len = BITS(4) + 8;
+ if (len > state->wbits) {
+ strm->msg = (char *)"invalid window size";
+ state->mode = BAD;
+ break;
+ }
+ state->dmax = 1U << len;
+ strm->adler = state->check = zlib_adler32(0L, NULL, 0);
+ state->mode = hold & 0x200 ? DICTID : TYPE;
+ INITBITS();
+ break;
+ case DICTID:
+ NEEDBITS(32);
+ strm->adler = state->check = REVERSE(hold);
+ INITBITS();
+ state->mode = DICT;
+ case DICT:
+ if (state->havedict == 0) {
+ RESTORE();
+ return Z_NEED_DICT;
+ }
+ strm->adler = state->check = zlib_adler32(0L, NULL, 0);
+ state->mode = TYPE;
+ case TYPE:
+ if (flush == Z_BLOCK) goto inf_leave;
+ case TYPEDO:
+ if (state->last) {
+ BYTEBITS();
+ state->mode = CHECK;
+ break;
+ }
+ NEEDBITS(3);
+ state->last = BITS(1);
+ DROPBITS(1);
+ switch (BITS(2)) {
+ case 0: /* stored block */
+ state->mode = STORED;
+ break;
+ case 1: /* fixed block */
+ zlib_fixedtables(state);
+ state->mode = LEN; /* decode codes */
+ break;
+ case 2: /* dynamic block */
+ state->mode = TABLE;
+ break;
+ case 3:
+ strm->msg = (char *)"invalid block type";
+ state->mode = BAD;
+ }
+ DROPBITS(2);
+ break;
+ case STORED:
+ BYTEBITS(); /* go to byte boundary */
+ NEEDBITS(32);
+ if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
+ strm->msg = (char *)"invalid stored block lengths";
+ state->mode = BAD;
+ break;
+ }
+ state->length = (unsigned)hold & 0xffff;
+ INITBITS();
+ state->mode = COPY;
+ case COPY:
+ copy = state->length;
+ if (copy) {
+ if (copy > have) copy = have;
+ if (copy > left) copy = left;
+ if (copy == 0) goto inf_leave;
+ memcpy(put, next, copy);
+ have -= copy;
+ next += copy;
+ left -= copy;
+ put += copy;
+ state->length -= copy;
+ break;
+ }
+ state->mode = TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14);
+ state->nlen = BITS(5) + 257;
+ DROPBITS(5);
+ state->ndist = BITS(5) + 1;
+ DROPBITS(5);
+ state->ncode = BITS(4) + 4;
+ DROPBITS(4);
+#ifndef PKZIP_BUG_WORKAROUND
+ if (state->nlen > 286 || state->ndist > 30) {
+ strm->msg = (char *)"too many length or distance symbols";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ state->have = 0;
+ state->mode = LENLENS;
+ case LENLENS:
+ while (state->have < state->ncode) {
+ NEEDBITS(3);
+ state->lens[order[state->have++]] = (unsigned short)BITS(3);
+ DROPBITS(3);
+ }
+ while (state->have < 19)
+ state->lens[order[state->have++]] = 0;
+ state->next = state->codes;
+ state->lencode = (code const *)(state->next);
+ state->lenbits = 7;
+ ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid code lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->have = 0;
+ state->mode = CODELENS;
+ case CODELENS:
+ while (state->have < state->nlen + state->ndist) {
+ for (;;) {
+ this = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (this.val < 16) {
+ NEEDBITS(this.bits);
+ DROPBITS(this.bits);
+ state->lens[state->have++] = this.val;
+ }
+ else {
+ if (this.val == 16) {
+ NEEDBITS(this.bits + 2);
+ DROPBITS(this.bits);
+ if (state->have == 0) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ len = state->lens[state->have - 1];
+ copy = 3 + BITS(2);
+ DROPBITS(2);
+ }
+ else if (this.val == 17) {
+ NEEDBITS(this.bits + 3);
+ DROPBITS(this.bits);
+ len = 0;
+ copy = 3 + BITS(3);
+ DROPBITS(3);
+ }
+ else {
+ NEEDBITS(this.bits + 7);
+ DROPBITS(this.bits);
+ len = 0;
+ copy = 11 + BITS(7);
+ DROPBITS(7);
+ }
+ if (state->have + copy > state->nlen + state->ndist) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ while (copy--)
+ state->lens[state->have++] = (unsigned short)len;
+ }
+ }
+
+ /* handle error breaks in while */
+ if (state->mode == BAD) break;
+
+ /* build code tables */
+ state->next = state->codes;
+ state->lencode = (code const *)(state->next);
+ state->lenbits = 9;
+ ret = zlib_inflate_table(LENS, state->lens, state->nlen, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid literal/lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->distcode = (code const *)(state->next);
+ state->distbits = 6;
+ ret = zlib_inflate_table(DISTS, state->lens + state->nlen, state->ndist,
+ &(state->next), &(state->distbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid distances set";
+ state->mode = BAD;
+ break;
+ }
+ state->mode = LEN;
+ case LEN:
+ if (have >= 6 && left >= 258) {
+ RESTORE();
+ inflate_fast(strm, out);
+ LOAD();
+ break;
+ }
+ for (;;) {
+ this = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (this.op && (this.op & 0xf0) == 0) {
+ last = this;
+ for (;;) {
+ this = state->lencode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(this.bits);
+ state->length = (unsigned)this.val;
+ if ((int)(this.op) == 0) {
+ state->mode = LIT;
+ break;
+ }
+ if (this.op & 32) {
+ state->mode = TYPE;
+ break;
+ }
+ if (this.op & 64) {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ state->extra = (unsigned)(this.op) & 15;
+ state->mode = LENEXT;
+ case LENEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->length += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+ state->mode = DIST;
+ case DIST:
+ for (;;) {
+ this = state->distcode[BITS(state->distbits)];
+ if ((unsigned)(this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if ((this.op & 0xf0) == 0) {
+ last = this;
+ for (;;) {
+ this = state->distcode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(this.bits);
+ if (this.op & 64) {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ state->offset = (unsigned)this.val;
+ state->extra = (unsigned)(this.op) & 15;
+ state->mode = DISTEXT;
+ case DISTEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->offset += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+#ifdef INFLATE_STRICT
+ if (state->offset > state->dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ if (state->offset > state->whave + out - left) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+ state->mode = MATCH;
+ case MATCH:
+ if (left == 0) goto inf_leave;
+ copy = out - left;
+ if (state->offset > copy) { /* copy from window */
+ copy = state->offset - copy;
+ if (copy > state->write) {
+ copy -= state->write;
+ from = state->window + (state->wsize - copy);
+ }
+ else
+ from = state->window + (state->write - copy);
+ if (copy > state->length) copy = state->length;
+ }
+ else { /* copy from output */
+ from = put - state->offset;
+ copy = state->length;
+ }
+ if (copy > left) copy = left;
+ left -= copy;
+ state->length -= copy;
+ do {
+ *put++ = *from++;
+ } while (--copy);
+ if (state->length == 0) state->mode = LEN;
+ break;
+ case LIT:
+ if (left == 0) goto inf_leave;
+ *put++ = (unsigned char)(state->length);
+ left--;
+ state->mode = LEN;
+ break;
+ case CHECK:
+ if (state->wrap) {
+ NEEDBITS(32);
+ out -= left;
+ strm->total_out += out;
+ state->total += out;
+ if (out)
+ strm->adler = state->check =
+ UPDATE(state->check, put - out, out);
+ out = left;
+ if ((
+ REVERSE(hold)) != state->check) {
+ strm->msg = (char *)"incorrect data check";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ }
+ state->mode = DONE;
+ case DONE:
+ ret = Z_STREAM_END;
+ goto inf_leave;
+ case BAD:
+ ret = Z_DATA_ERROR;
+ goto inf_leave;
+ case MEM:
+ return Z_MEM_ERROR;
+ case SYNC:
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ /*
+ Return from inflate(), updating the total counts and the check value.
+ If there was no progress during the inflate() call, return a buffer
+ error. Call zlib_updatewindow() to create and/or update the window state.
+ */
+ inf_leave:
+ RESTORE();
+ if (state->wsize || (state->mode < CHECK && out != strm->avail_out))
+ zlib_updatewindow(strm, out);
+
+ in -= strm->avail_in;
+ out -= strm->avail_out;
+ strm->total_in += in;
+ strm->total_out += out;
+ state->total += out;
+ if (state->wrap && out)
+ strm->adler = state->check =
+ UPDATE(state->check, strm->next_out - out, out);
+
+ strm->data_type = state->bits + (state->last ? 64 : 0) +
+ (state->mode == TYPE ? 128 : 0);
+ if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
+ ret = Z_BUF_ERROR;
+
+ if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
+ (strm->avail_out != 0 || strm->avail_in == 0))
+ return zlib_inflateSyncPacket(strm);
+ return ret;
+}
+
+int zlib_inflateEnd(z_streamp strm)
+{
+ if (strm == NULL || strm->state == NULL)
+ return Z_STREAM_ERROR;
return Z_OK;
}
+#if 0
+int zlib_inflateSetDictionary(z_streamp strm, const Byte *dictionary,
+ uInt dictLength)
+{
+ struct inflate_state *state;
+ unsigned long id;
+
+ /* check state */
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state *)strm->state;
+ if (state->wrap != 0 && state->mode != DICT)
+ return Z_STREAM_ERROR;
+
+ /* check for correct dictionary id */
+ if (state->mode == DICT) {
+ id = zlib_adler32(0L, NULL, 0);
+ id = zlib_adler32(id, dictionary, dictLength);
+ if (id != state->check)
+ return Z_DATA_ERROR;
+ }
+
+ /* copy dictionary to window */
+ zlib_updatewindow(strm, strm->avail_out);
-int zlib_inflateInit_(
- z_streamp z,
- const char *version,
- int stream_size
-)
+ if (dictLength > state->wsize) {
+ memcpy(state->window, dictionary + dictLength - state->wsize,
+ state->wsize);
+ state->whave = state->wsize;
+ }
+ else {
+ memcpy(state->window + state->wsize - dictLength, dictionary,
+ dictLength);
+ state->whave = dictLength;
+ }
+ state->havedict = 1;
+ return Z_OK;
+}
+#endif
+
+#if 0
+/*
+ Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
+ or when out of input. When called, *have is the number of pattern bytes
+ found in order so far, in 0..3. On return *have is updated to the new
+ state. If on return *have equals four, then the pattern was found and the
+ return value is how many bytes were read including the last byte of the
+ pattern. If *have is less than four, then the pattern has not been found
+ yet and the return value is len. In the latter case, zlib_syncsearch() can be
+ called again with more data and the *have state. *have is initialized to
+ zero for the first call.
+ */
+static unsigned zlib_syncsearch(unsigned *have, unsigned char *buf,
+ unsigned len)
{
- return zlib_inflateInit2_(z, DEF_WBITS, version, stream_size);
+ unsigned got;
+ unsigned next;
+
+ got = *have;
+ next = 0;
+ while (next < len && got < 4) {
+ if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
+ got++;
+ else if (buf[next])
+ got = 0;
+ else
+ got = 4 - got;
+ next++;
+ }
+ *have = got;
+ return next;
}
+#endif
-#undef NEEDBYTE
-#undef NEXTBYTE
-#define NEEDBYTE {if(z->avail_in==0)goto empty;r=trv;}
-#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+#if 0
+int zlib_inflateSync(z_streamp strm)
+{
+ unsigned len; /* number of bytes to look at or looked at */
+ unsigned long in, out; /* temporary to save total_in and total_out */
+ unsigned char buf[4]; /* to restore bit buffer to byte string */
+ struct inflate_state *state;
+
+ /* check parameters */
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state *)strm->state;
+ if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
+
+ /* if first time, start search in bit buffer */
+ if (state->mode != SYNC) {
+ state->mode = SYNC;
+ state->hold <<= state->bits & 7;
+ state->bits -= state->bits & 7;
+ len = 0;
+ while (state->bits >= 8) {
+ buf[len++] = (unsigned char)(state->hold);
+ state->hold >>= 8;
+ state->bits -= 8;
+ }
+ state->have = 0;
+ zlib_syncsearch(&(state->have), buf, len);
+ }
+
+ /* search available input */
+ len = zlib_syncsearch(&(state->have), strm->next_in, strm->avail_in);
+ strm->avail_in -= len;
+ strm->next_in += len;
+ strm->total_in += len;
+
+ /* return no joy or set up to restart inflate() on a new block */
+ if (state->have != 4) return Z_DATA_ERROR;
+ in = strm->total_in; out = strm->total_out;
+ zlib_inflateReset(strm);
+ strm->total_in = in; strm->total_out = out;
+ state->mode = TYPE;
+ return Z_OK;
+}
+#endif
-int zlib_inflate(
- z_streamp z,
- int f
-)
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output but this should always be the case. The state must
+ * be waiting on the start of a block (i.e. mode == TYPE or HEAD). On exit,
+ * the output will also be caught up, and the checksum will have been updated
+ * if need be.
+ */
+int zlib_inflateIncomp(z_stream *z)
{
- int r, trv;
- uInt b;
-
- if (z == NULL || z->state == NULL || z->next_in == NULL)
- return Z_STREAM_ERROR;
- trv = f == Z_FINISH ? Z_BUF_ERROR : Z_OK;
- r = Z_BUF_ERROR;
- while (1) switch (z->state->mode)
- {
- case METHOD:
- NEEDBYTE
- if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
- {
- z->state->mode = I_BAD;
- z->msg = (char*)"unknown compression method";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
- {
- z->state->mode = I_BAD;
- z->msg = (char*)"invalid window size";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- z->state->mode = FLAG;
- case FLAG:
- NEEDBYTE
- b = NEXTBYTE;
- if (((z->state->sub.method << 8) + b) % 31)
- {
- z->state->mode = I_BAD;
- z->msg = (char*)"incorrect header check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- if (!(b & PRESET_DICT))
- {
- z->state->mode = BLOCKS;
- break;
- }
- z->state->mode = DICT4;
- case DICT4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = DICT3;
- case DICT3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = DICT2;
- case DICT2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = DICT1;
- case DICT1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
- z->adler = z->state->sub.check.need;
- z->state->mode = DICT0;
- return Z_NEED_DICT;
- case DICT0:
- z->state->mode = I_BAD;
- z->msg = (char*)"need dictionary";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_STREAM_ERROR;
- case BLOCKS:
- r = zlib_inflate_blocks(z->state->blocks, z, r);
- if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
- r = zlib_inflate_packet_flush(z->state->blocks);
- if (r == Z_DATA_ERROR)
- {
- z->state->mode = I_BAD;
- z->state->sub.marker = 0; /* can try inflateSync */
- break;
- }
- if (r == Z_OK)
- r = trv;
- if (r != Z_STREAM_END)
- return r;
- r = trv;
- zlib_inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
- if (z->state->nowrap)
- {
- z->state->mode = I_DONE;
- break;
- }
- z->state->mode = CHECK4;
- case CHECK4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = CHECK3;
- case CHECK3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = CHECK2;
- case CHECK2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = CHECK1;
- case CHECK1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
-
- if (z->state->sub.check.was != z->state->sub.check.need)
- {
- z->state->mode = I_BAD;
- z->msg = (char*)"incorrect data check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- z->state->mode = I_DONE;
- case I_DONE:
- return Z_STREAM_END;
- case I_BAD:
- return Z_DATA_ERROR;
- default:
- return Z_STREAM_ERROR;
- }
- empty:
- if (f != Z_PACKET_FLUSH)
- return r;
- z->state->mode = I_BAD;
- z->msg = (char *)"need more for packet flush";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_DATA_ERROR;
+ struct inflate_state *state = (struct inflate_state *)z->state;
+ Byte *saved_no = z->next_out;
+ uInt saved_ao = z->avail_out;
+
+ if (state->mode != TYPE && state->mode != HEAD)
+ return Z_DATA_ERROR;
+
+ /* Setup some variables to allow misuse of updateWindow */
+ z->avail_out = 0;
+ z->next_out = z->next_in + z->avail_in;
+
+ zlib_updatewindow(z, z->avail_in);
+
+ /* Restore saved variables */
+ z->avail_out = saved_ao;
+ z->next_out = saved_no;
+
+ z->adler = state->check =
+ UPDATE(state->check, z->next_in, z->avail_in);
+
+ z->total_out += z->avail_in;
+ z->total_in += z->avail_in;
+ z->next_in += z->avail_in;
+ state->total += z->avail_in;
+ z->avail_in = 0;
+
+ return Z_OK;
}
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
new file mode 100644
index 00000000000..df8a6c92052
--- /dev/null
+++ b/lib/zlib_inflate/inflate.h
@@ -0,0 +1,107 @@
+/* inflate.h -- internal inflate state definition
+ * Copyright (C) 1995-2004 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Possible inflate modes between inflate() calls */
+typedef enum {
+ HEAD, /* i: waiting for magic header */
+ FLAGS, /* i: waiting for method and flags (gzip) */
+ TIME, /* i: waiting for modification time (gzip) */
+ OS, /* i: waiting for extra flags and operating system (gzip) */
+ EXLEN, /* i: waiting for extra length (gzip) */
+ EXTRA, /* i: waiting for extra bytes (gzip) */
+ NAME, /* i: waiting for end of file name (gzip) */
+ COMMENT, /* i: waiting for end of comment (gzip) */
+ HCRC, /* i: waiting for header crc (gzip) */
+ DICTID, /* i: waiting for dictionary check value */
+ DICT, /* waiting for inflateSetDictionary() call */
+ TYPE, /* i: waiting for type bits, including last-flag bit */
+ TYPEDO, /* i: same, but skip check to exit inflate on new block */
+ STORED, /* i: waiting for stored size (length and complement) */
+ COPY, /* i/o: waiting for input or output to copy stored block */
+ TABLE, /* i: waiting for dynamic block table lengths */
+ LENLENS, /* i: waiting for code length code lengths */
+ CODELENS, /* i: waiting for length/lit and distance code lengths */
+ LEN, /* i: waiting for length/lit code */
+ LENEXT, /* i: waiting for length extra bits */
+ DIST, /* i: waiting for distance code */
+ DISTEXT, /* i: waiting for distance extra bits */
+ MATCH, /* o: waiting for output space to copy string */
+ LIT, /* o: waiting for output space to write literal */
+ CHECK, /* i: waiting for 32-bit check value */
+ LENGTH, /* i: waiting for 32-bit length (gzip) */
+ DONE, /* finished check, done -- remain here until reset */
+ BAD, /* got a data error -- remain here until reset */
+ MEM, /* got an inflate() memory error -- remain here until reset */
+ SYNC /* looking for synchronization bytes to restart inflate() */
+} inflate_mode;
+
+/*
+ State transitions between above modes -
+
+ (most modes can go to the BAD or MEM mode -- not shown for clarity)
+
+ Process header:
+ HEAD -> (gzip) or (zlib)
+ (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME
+ NAME -> COMMENT -> HCRC -> TYPE
+ (zlib) -> DICTID or TYPE
+ DICTID -> DICT -> TYPE
+ Read deflate blocks:
+ TYPE -> STORED or TABLE or LEN or CHECK
+ STORED -> COPY -> TYPE
+ TABLE -> LENLENS -> CODELENS -> LEN
+ Read deflate codes:
+ LEN -> LENEXT or LIT or TYPE
+ LENEXT -> DIST -> DISTEXT -> MATCH -> LEN
+ LIT -> LEN
+ Process trailer:
+ CHECK -> LENGTH -> DONE
+ */
+
+/* state maintained between inflate() calls. Approximately 7K bytes. */
+struct inflate_state {
+ inflate_mode mode; /* current inflate mode */
+ int last; /* true if processing last block */
+ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
+ int havedict; /* true if dictionary provided */
+ int flags; /* gzip header method and flags (0 if zlib) */
+ unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */
+ unsigned long check; /* protected copy of check value */
+ unsigned long total; /* protected copy of output count */
+ /* gz_headerp head; */ /* where to save gzip header information */
+ /* sliding window */
+ unsigned wbits; /* log base 2 of requested window size */
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned write; /* window write index */
+ unsigned char *window; /* allocated sliding window, if needed */
+ /* bit accumulator */
+ unsigned long hold; /* input bit accumulator */
+ unsigned bits; /* number of bits in "in" */
+ /* for string and stored block copying */
+ unsigned length; /* literal or length of data to copy */
+ unsigned offset; /* distance back to copy string from */
+ /* for table and code decoding */
+ unsigned extra; /* extra bits needed */
+ /* fixed and dynamic code tables */
+ code const *lencode; /* starting table for length/literal codes */
+ code const *distcode; /* starting table for distance codes */
+ unsigned lenbits; /* index bits for lencode */
+ unsigned distbits; /* index bits for distcode */
+ /* dynamic table building */
+ unsigned ncode; /* number of code length code lengths */
+ unsigned nlen; /* number of length code lengths */
+ unsigned ndist; /* number of distance code lengths */
+ unsigned have; /* number of code lengths in lens[] */
+ code *next; /* next available space in codes[] */
+ unsigned short lens[320]; /* temporary storage for code lengths */
+ unsigned short work[288]; /* work area for code table building */
+ code codes[ENOUGH]; /* space for code tables */
+};
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c
index ef49738f57e..2061d4f0676 100644
--- a/lib/zlib_inflate/inflate_syms.c
+++ b/lib/zlib_inflate/inflate_syms.c
@@ -12,8 +12,7 @@
EXPORT_SYMBOL(zlib_inflate_workspacesize);
EXPORT_SYMBOL(zlib_inflate);
-EXPORT_SYMBOL(zlib_inflateInit_);
-EXPORT_SYMBOL(zlib_inflateInit2_);
+EXPORT_SYMBOL(zlib_inflateInit2);
EXPORT_SYMBOL(zlib_inflateEnd);
EXPORT_SYMBOL(zlib_inflateReset);
EXPORT_SYMBOL(zlib_inflateIncomp);
diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c
deleted file mode 100644
index 61411ff89d6..00000000000
--- a/lib/zlib_inflate/inflate_sync.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/* inflate.c -- zlib interface to inflate modules
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#include <linux/zutil.h>
-#include "infblock.h"
-#include "infutil.h"
-
-#if 0
-int zlib_inflateSync(
- z_streamp z
-)
-{
- uInt n; /* number of bytes to look at */
- Byte *p; /* pointer to bytes */
- uInt m; /* number of marker bytes found in a row */
- uLong r, w; /* temporaries to save total_in and total_out */
-
- /* set up */
- if (z == NULL || z->state == NULL)
- return Z_STREAM_ERROR;
- if (z->state->mode != I_BAD)
- {
- z->state->mode = I_BAD;
- z->state->sub.marker = 0;
- }
- if ((n = z->avail_in) == 0)
- return Z_BUF_ERROR;
- p = z->next_in;
- m = z->state->sub.marker;
-
- /* search */
- while (n && m < 4)
- {
- static const Byte mark[4] = {0, 0, 0xff, 0xff};
- if (*p == mark[m])
- m++;
- else if (*p)
- m = 0;
- else
- m = 4 - m;
- p++, n--;
- }
-
- /* restore */
- z->total_in += p - z->next_in;
- z->next_in = p;
- z->avail_in = n;
- z->state->sub.marker = m;
-
- /* return no joy or set up to restart on a new block */
- if (m != 4)
- return Z_DATA_ERROR;
- r = z->total_in; w = z->total_out;
- zlib_inflateReset(z);
- z->total_in = r; z->total_out = w;
- z->state->mode = BLOCKS;
- return Z_OK;
-}
-#endif /* 0 */
-
-
-/* Returns true if inflate is currently at the end of a block generated
- * by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
- * implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH
- * but removes the length bytes of the resulting empty stored block. When
- * decompressing, PPP checks that at the end of input packet, inflate is
- * waiting for these length bytes.
- */
-#if 0
-int zlib_inflateSyncPoint(
- z_streamp z
-)
-{
- if (z == NULL || z->state == NULL || z->state->blocks == NULL)
- return Z_STREAM_ERROR;
- return zlib_inflate_blocks_sync_point(z->state->blocks);
-}
-#endif /* 0 */
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-static int zlib_inflate_addhistory(inflate_blocks_statef *s,
- z_stream *z)
-{
- uLong b; /* bit buffer */ /* NOT USED HERE */
- uInt k; /* bits in bit buffer */ /* NOT USED HERE */
- uInt t; /* temporary storage */
- Byte *p; /* input data pointer */
- uInt n; /* bytes available there */
- Byte *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- if (s->read != s->write)
- return Z_STREAM_ERROR;
- if (s->mode != TYPE)
- return Z_DATA_ERROR;
-
- /* we're ready to rock */
- LOAD
- /* while there is input ready, copy to output buffer, moving
- * pointers as needed.
- */
- while (n) {
- t = n; /* how many to do */
- /* is there room until end of buffer? */
- if (t > m) t = m;
- /* update check information */
- if (s->checkfn != NULL)
- s->check = (*s->checkfn)(s->check, q, t);
- memcpy(q, p, t);
- q += t;
- p += t;
- n -= t;
- z->total_out += t;
- s->read = q; /* drag read pointer forward */
-/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */
- if (q == s->end) {
- s->read = q = s->window;
- m = WAVAIL;
- }
- }
- UPDATE
- return Z_OK;
-}
-
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-
-int zlib_inflateIncomp(
- z_stream *z
-
-)
-{
- if (z->state->mode != BLOCKS)
- return Z_DATA_ERROR;
- return zlib_inflate_addhistory(z->state->blocks, z);
-}
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
index 874950ec485..3fe6ce5b53e 100644
--- a/lib/zlib_inflate/inftrees.c
+++ b/lib/zlib_inflate/inftrees.c
@@ -1,412 +1,315 @@
/* inftrees.c -- generate Huffman trees for efficient decoding
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * Copyright (C) 1995-2005 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
#include <linux/zutil.h>
#include "inftrees.h"
-#include "infutil.h"
-static const char inflate_copyright[] __attribute_used__ =
- " inflate 1.1.3 Copyright 1995-1998 Mark Adler ";
+#define MAXBITS 15
+
/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
+ Build a set of tables to decode the provided canonical Huffman code.
+ The code lengths are lens[0..codes-1]. The result starts at *table,
+ whose indices are 0..2^bits-1. work is a writable array of at least
+ lens shorts, which is used as a work area. type is the type of code
+ to be generated, CODES, LENS, or DISTS. On return, zero is success,
+ -1 is an invalid code, and +1 means that ENOUGH isn't enough. table
+ on return points to the next available entry's address. bits is the
+ requested root table index bits, and on return it is the actual root
+ table index bits. It will differ if the request is greater than the
+ longest code or if it is less than the shortest code.
*/
-struct internal_state;
-
-/* simplify the use of the inflate_huft type with some defines */
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-
-static int huft_build (
- uInt *, /* code lengths in bits */
- uInt, /* number of codes */
- uInt, /* number of "simple" codes */
- const uInt *, /* list of base values for non-simple codes */
- const uInt *, /* list of extra bits for non-simple codes */
- inflate_huft **, /* result: starting table */
- uInt *, /* maximum lookup bits (returns actual) */
- inflate_huft *, /* space for trees */
- uInt *, /* hufts used in space */
- uInt * ); /* space for values */
-
-/* Tables for deflate from PKZIP's appnote.txt. */
-static const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
+int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
+ code **table, unsigned *bits, unsigned short *work)
+{
+ unsigned len; /* a code's length in bits */
+ unsigned sym; /* index of code symbols */
+ unsigned min, max; /* minimum and maximum code lengths */
+ unsigned root; /* number of index bits for root table */
+ unsigned curr; /* number of index bits for current table */
+ unsigned drop; /* code bits to drop for sub-table */
+ int left; /* number of prefix codes available */
+ unsigned used; /* code entries in table used */
+ unsigned huff; /* Huffman code */
+ unsigned incr; /* for incrementing code, index */
+ unsigned fill; /* index for replicating entries */
+ unsigned low; /* low bits for current root entry */
+ unsigned mask; /* mask for low root bits */
+ code this; /* table entry for duplication */
+ code *next; /* next available space in table */
+ const unsigned short *base; /* base value table to use */
+ const unsigned short *extra; /* extra bits table to use */
+ int end; /* use base and extra for symbol > end */
+ unsigned short count[MAXBITS+1]; /* number of codes of each length */
+ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
+ static const unsigned short lbase[31] = { /* Length codes 257..285 base */
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
- /* see note #13 above about 258 */
-static const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
-static const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
+ static const unsigned short lext[31] = { /* Length codes 257..285 extra */
+ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196};
+ static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
- 8193, 12289, 16385, 24577};
-static const uInt cpdext[30] = { /* Extra bits for distance codes */
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
- 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
- 12, 12, 13, 13};
-
-/*
- Huffman code decoding is performed using a multi-level table lookup.
- The fastest way to decode is to simply build a lookup table whose
- size is determined by the longest code. However, the time it takes
- to build this table can also be a factor if the data being decoded
- is not very long. The most common codes are necessarily the
- shortest codes, so those codes dominate the decoding time, and hence
- the speed. The idea is you can have a shorter table that decodes the
- shorter, more probable codes, and then point to subsidiary tables for
- the longer codes. The time it costs to decode the longer codes is
- then traded against the time it takes to make longer tables.
-
- This results of this trade are in the variables lbits and dbits
- below. lbits is the number of bits the first level table for literal/
- length codes can decode in one step, and dbits is the same thing for
- the distance codes. Subsequent tables are also less than or equal to
- those sizes. These values may be adjusted either when all of the
- codes are shorter than that, in which case the longest code length in
- bits is used, or when the shortest code is *longer* than the requested
- table size, in which case the length of the shortest code in bits is
- used.
-
- There are two different values for the two tables, since they code a
- different number of possibilities each. The literal/length table
- codes 286 possible values, or in a flat code, a little over eight
- bits. The distance table codes 30 possible values, or a little less
- than five bits, flat. The optimum values for speed end up being
- about one bit more than those, so lbits is 8+1 and dbits is 5+1.
- The optimum values may differ though from machine to machine, and
- possibly even between compilers. Your mileage may vary.
- */
-
-
-/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
-#define BMAX 15 /* maximum bit length of any code */
-
-static int huft_build(
- uInt *b, /* code lengths in bits (all assumed <= BMAX) */
- uInt n, /* number of codes (assumed <= 288) */
- uInt s, /* number of simple-valued codes (0..s-1) */
- const uInt *d, /* list of base values for non-simple codes */
- const uInt *e, /* list of extra bits for non-simple codes */
- inflate_huft **t, /* result: starting table */
- uInt *m, /* maximum lookup bits, returns actual */
- inflate_huft *hp, /* space for trees */
- uInt *hn, /* hufts used in space */
- uInt *v /* working area: values in order of bit length */
-)
-/* Given a list of code lengths and a maximum table size, make a set of
- tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
- if the given code set is incomplete (the tables are still built in this
- case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
- lengths), or Z_MEM_ERROR if not enough memory. */
-{
+ 8193, 12289, 16385, 24577, 0, 0};
+ static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
+ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
+ 28, 28, 29, 29, 64, 64};
+
+ /*
+ Process a set of code lengths to create a canonical Huffman code. The
+ code lengths are lens[0..codes-1]. Each length corresponds to the
+ symbols 0..codes-1. The Huffman code is generated by first sorting the
+ symbols by length from short to long, and retaining the symbol order
+ for codes with equal lengths. Then the code starts with all zero bits
+ for the first code of the shortest length, and the codes are integer
+ increments for the same length, and zeros are appended as the length
+ increases. For the deflate format, these bits are stored backwards
+ from their more natural integer increment ordering, and so when the
+ decoding tables are built in the large loop below, the integer codes
+ are incremented backwards.
+
+ This routine assumes, but does not check, that all of the entries in
+ lens[] are in the range 0..MAXBITS. The caller must assure this.
+ 1..MAXBITS is interpreted as that code length. zero means that that
+ symbol does not occur in this code.
+
+ The codes are sorted by computing a count of codes for each length,
+ creating from that a table of starting indices for each length in the
+ sorted table, and then entering the symbols in order in the sorted
+ table. The sorted table is work[], with that space being provided by
+ the caller.
+
+ The length counts are used for other purposes as well, i.e. finding
+ the minimum and maximum length codes, determining if there are any
+ codes at all, checking for a valid set of lengths, and looking ahead
+ at length counts to determine sub-table sizes when building the
+ decoding tables.
+ */
+
+ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
+ for (len = 0; len <= MAXBITS; len++)
+ count[len] = 0;
+ for (sym = 0; sym < codes; sym++)
+ count[lens[sym]]++;
+
+ /* bound code lengths, force root to be within code lengths */
+ root = *bits;
+ for (max = MAXBITS; max >= 1; max--)
+ if (count[max] != 0) break;
+ if (root > max) root = max;
+ if (max == 0) { /* no symbols to code at all */
+ this.op = (unsigned char)64; /* invalid code marker */
+ this.bits = (unsigned char)1;
+ this.val = (unsigned short)0;
+ *(*table)++ = this; /* make a table to force an error */
+ *(*table)++ = this;
+ *bits = 1;
+ return 0; /* no symbols, but wait for decoding to report error */
+ }
+ for (min = 1; min <= MAXBITS; min++)
+ if (count[min] != 0) break;
+ if (root < min) root = min;
+
+ /* check for an over-subscribed or incomplete set of lengths */
+ left = 1;
+ for (len = 1; len <= MAXBITS; len++) {
+ left <<= 1;
+ left -= count[len];
+ if (left < 0) return -1; /* over-subscribed */
+ }
+ if (left > 0 && (type == CODES || max != 1))
+ return -1; /* incomplete set */
+
+ /* generate offsets into symbol table for each length for sorting */
+ offs[1] = 0;
+ for (len = 1; len < MAXBITS; len++)
+ offs[len + 1] = offs[len] + count[len];
+
+ /* sort symbols by length, by symbol order within each length */
+ for (sym = 0; sym < codes; sym++)
+ if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
+
+ /*
+ Create and fill in decoding tables. In this loop, the table being
+ filled is at next and has curr index bits. The code being used is huff
+ with length len. That code is converted to an index by dropping drop
+ bits off of the bottom. For codes where len is less than drop + curr,
+ those top drop + curr - len bits are incremented through all values to
+ fill the table with replicated entries.
+
+ root is the number of index bits for the root table. When len exceeds
+ root, sub-tables are created pointed to by the root entry with an index
+ of the low root bits of huff. This is saved in low to check for when a
+ new sub-table should be started. drop is zero when the root table is
+ being filled, and drop is root when sub-tables are being filled.
+
+ When a new sub-table is needed, it is necessary to look ahead in the
+ code lengths to determine what size sub-table is needed. The length
+ counts are used for this, and so count[] is decremented as codes are
+ entered in the tables.
+
+ used keeps track of how many table entries have been allocated from the
+ provided *table space. It is checked when a LENS table is being made
+ against the space in *table, ENOUGH, minus the maximum space needed by
+ the worst case distance code, MAXD. This should never happen, but the
+ sufficiency of ENOUGH has not been proven exhaustively, hence the check.
+ This assumes that when type == LENS, bits == 9.
+
+ sym increments through all symbols, and the loop terminates when
+ all codes of length max, i.e. all codes, have been processed. This
+ routine permits incomplete codes, so another loop after this one fills
+ in the rest of the decoding tables with invalid code markers.
+ */
+
+ /* set up for code type */
+ switch (type) {
+ case CODES:
+ base = extra = work; /* dummy value--not used */
+ end = 19;
+ break;
+ case LENS:
+ base = lbase;
+ base -= 257;
+ extra = lext;
+ extra -= 257;
+ end = 256;
+ break;
+ default: /* DISTS */
+ base = dbase;
+ extra = dext;
+ end = -1;
+ }
- uInt a; /* counter for codes of length k */
- uInt c[BMAX+1]; /* bit length count table */
- uInt f; /* i repeats in table every f entries */
- int g; /* maximum code length */
- int h; /* table level */
- register uInt i; /* counter, current code */
- register uInt j; /* counter */
- register int k; /* number of bits in current code */
- int l; /* bits per table (returned in m) */
- uInt mask; /* (1 << w) - 1, to avoid cc -O bug on HP */
- register uInt *p; /* pointer into c[], b[], or v[] */
- inflate_huft *q; /* points to current table */
- struct inflate_huft_s r; /* table entry for structure assignment */
- inflate_huft *u[BMAX]; /* table stack */
- register int w; /* bits before this table == (l * h) */
- uInt x[BMAX+1]; /* bit offsets, then code stack */
- uInt *xp; /* pointer into x */
- int y; /* number of dummy codes added */
- uInt z; /* number of entries in current table */
-
-
- /* Generate counts for each bit length */
- p = c;
-#define C0 *p++ = 0;
-#define C2 C0 C0 C0 C0
-#define C4 C2 C2 C2 C2
- C4 /* clear c[]--assume BMAX+1 is 16 */
- p = b; i = n;
- do {
- c[*p++]++; /* assume all entries <= BMAX */
- } while (--i);
- if (c[0] == n) /* null input--all zero length codes */
- {
- *t = NULL;
- *m = 0;
- return Z_OK;
- }
-
-
- /* Find minimum and maximum length, bound *m by those */
- l = *m;
- for (j = 1; j <= BMAX; j++)
- if (c[j])
- break;
- k = j; /* minimum code length */
- if ((uInt)l < j)
- l = j;
- for (i = BMAX; i; i--)
- if (c[i])
- break;
- g = i; /* maximum code length */
- if ((uInt)l > i)
- l = i;
- *m = l;
-
-
- /* Adjust last length count to fill out codes, if needed */
- for (y = 1 << j; j < i; j++, y <<= 1)
- if ((y -= c[j]) < 0)
- return Z_DATA_ERROR;
- if ((y -= c[i]) < 0)
- return Z_DATA_ERROR;
- c[i] += y;
-
-
- /* Generate starting offsets into the value table for each length */
- x[1] = j = 0;
- p = c + 1; xp = x + 2;
- while (--i) { /* note that i == g from above */
- *xp++ = (j += *p++);
- }
-
-
- /* Make a table of values in order of bit lengths */
- p = b; i = 0;
- do {
- if ((j = *p++) != 0)
- v[x[j]++] = i;
- } while (++i < n);
- n = x[g]; /* set n to length of v */
-
-
- /* Generate the Huffman codes and for each, make the table entries */
- x[0] = i = 0; /* first Huffman code is zero */
- p = v; /* grab values in bit order */
- h = -1; /* no tables yet--level -1 */
- w = -l; /* bits decoded == (l * h) */
- u[0] = NULL; /* just to keep compilers happy */
- q = NULL; /* ditto */
- z = 0; /* ditto */
-
- /* go through the bit lengths (k already is bits in shortest code) */
- for (; k <= g; k++)
- {
- a = c[k];
- while (a--)
- {
- /* here i is the Huffman code of length k bits for value *p */
- /* make tables up to required level */
- while (k > w + l)
- {
- h++;
- w += l; /* previous table always l bits */
-
- /* compute minimum size table less than or equal to l bits */
- z = g - w;
- z = z > (uInt)l ? l : z; /* table size upper limit */
- if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
- { /* too few codes for k-w bit table */
- f -= a + 1; /* deduct codes from patterns left */
- xp = c + k;
- if (j < z)
- while (++j < z) /* try smaller tables up to z bits */
- {
- if ((f <<= 1) <= *++xp)
- break; /* enough codes to use up j bits */
- f -= *xp; /* else deduct codes from patterns */
- }
+ /* initialize state for loop */
+ huff = 0; /* starting code */
+ sym = 0; /* starting code symbol */
+ len = min; /* starting code length */
+ next = *table; /* current table to fill in */
+ curr = root; /* current table index bits */
+ drop = 0; /* current bits to drop from code for index */
+ low = (unsigned)(-1); /* trigger new sub-table when len > root */
+ used = 1U << root; /* use root table entries */
+ mask = used - 1; /* mask for comparing low */
+
+ /* check available table space */
+ if (type == LENS && used >= ENOUGH - MAXD)
+ return 1;
+
+ /* process all codes and make table entries */
+ for (;;) {
+ /* create table entry */
+ this.bits = (unsigned char)(len - drop);
+ if ((int)(work[sym]) < end) {
+ this.op = (unsigned char)0;
+ this.val = work[sym];
}
- z = 1 << j; /* table entries for j-bit table */
-
- /* allocate new table */
- if (*hn + z > MANY) /* (note: doesn't matter for fixed) */
- return Z_DATA_ERROR; /* overflow of MANY */
- u[h] = q = hp + *hn;
- *hn += z;
-
- /* connect to last table, if there is one */
- if (h)
- {
- x[h] = i; /* save pattern for backing up */
- r.bits = (Byte)l; /* bits to dump before this table */
- r.exop = (Byte)j; /* bits in this table */
- j = i >> (w - l);
- r.base = (uInt)(q - u[h-1] - j); /* offset to this table */
- u[h-1][j] = r; /* connect to last table */
+ else if ((int)(work[sym]) > end) {
+ this.op = (unsigned char)(extra[work[sym]]);
+ this.val = base[work[sym]];
+ }
+ else {
+ this.op = (unsigned char)(32 + 64); /* end of block */
+ this.val = 0;
}
- else
- *t = q; /* first table is returned result */
- }
-
- /* set up table entry in r */
- r.bits = (Byte)(k - w);
- if (p >= v + n)
- r.exop = 128 + 64; /* out of values--invalid code */
- else if (*p < s)
- {
- r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
- r.base = *p++; /* simple code is just the value */
- }
- else
- {
- r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
- r.base = d[*p++ - s];
- }
-
- /* fill code-like entries with r */
- f = 1 << (k - w);
- for (j = i >> w; j < z; j += f)
- q[j] = r;
-
- /* backwards increment the k-bit code i */
- for (j = 1 << (k - 1); i & j; j >>= 1)
- i ^= j;
- i ^= j;
-
- /* backup over finished tables */
- mask = (1 << w) - 1; /* needed on HP, cc -O bug */
- while ((i & mask) != x[h])
- {
- h--; /* don't need to update q */
- w -= l;
- mask = (1 << w) - 1;
- }
- }
- }
+ /* replicate for those indices with low len bits equal to huff */
+ incr = 1U << (len - drop);
+ fill = 1U << curr;
+ min = fill; /* save offset to next table */
+ do {
+ fill -= incr;
+ next[(huff >> drop) + fill] = this;
+ } while (fill != 0);
+
+ /* backwards increment the len-bit code huff */
+ incr = 1U << (len - 1);
+ while (huff & incr)
+ incr >>= 1;
+ if (incr != 0) {
+ huff &= incr - 1;
+ huff += incr;
+ }
+ else
+ huff = 0;
- /* Return Z_BUF_ERROR if we were given an incomplete table */
- return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
-}
+ /* go to next symbol, update count, len */
+ sym++;
+ if (--(count[len]) == 0) {
+ if (len == max) break;
+ len = lens[work[sym]];
+ }
+ /* create new sub-table if needed */
+ if (len > root && (huff & mask) != low) {
+ /* if first time, transition to sub-tables */
+ if (drop == 0)
+ drop = root;
+
+ /* increment past last table */
+ next += min; /* here min is 1 << curr */
+
+ /* determine length of next table */
+ curr = len - drop;
+ left = (int)(1 << curr);
+ while (curr + drop < max) {
+ left -= count[curr + drop];
+ if (left <= 0) break;
+ curr++;
+ left <<= 1;
+ }
-int zlib_inflate_trees_bits(
- uInt *c, /* 19 code lengths */
- uInt *bb, /* bits tree desired/actual depth */
- inflate_huft **tb, /* bits tree result */
- inflate_huft *hp, /* space for trees */
- z_streamp z /* for messages */
-)
-{
- int r;
- uInt hn = 0; /* hufts used in space */
- uInt *v; /* work area for huft_build */
-
- v = WS(z)->tree_work_area_1;
- r = huft_build(c, 19, 19, NULL, NULL, tb, bb, hp, &hn, v);
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed dynamic bit lengths tree";
- else if (r == Z_BUF_ERROR || *bb == 0)
- {
- z->msg = (char*)"incomplete dynamic bit lengths tree";
- r = Z_DATA_ERROR;
- }
- return r;
-}
+ /* check for enough space */
+ used += 1U << curr;
+ if (type == LENS && used >= ENOUGH - MAXD)
+ return 1;
-int zlib_inflate_trees_dynamic(
- uInt nl, /* number of literal/length codes */
- uInt nd, /* number of distance codes */
- uInt *c, /* that many (total) code lengths */
- uInt *bl, /* literal desired/actual bit depth */
- uInt *bd, /* distance desired/actual bit depth */
- inflate_huft **tl, /* literal/length tree result */
- inflate_huft **td, /* distance tree result */
- inflate_huft *hp, /* space for trees */
- z_streamp z /* for messages */
-)
-{
- int r;
- uInt hn = 0; /* hufts used in space */
- uInt *v; /* work area for huft_build */
-
- /* allocate work area */
- v = WS(z)->tree_work_area_2;
-
- /* build literal/length tree */
- r = huft_build(c, nl, 257, cplens, cplext, tl, bl, hp, &hn, v);
- if (r != Z_OK || *bl == 0)
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed literal/length tree";
- else if (r != Z_MEM_ERROR)
- {
- z->msg = (char*)"incomplete literal/length tree";
- r = Z_DATA_ERROR;
- }
- return r;
- }
-
- /* build distance tree */
- r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, hp, &hn, v);
- if (r != Z_OK || (*bd == 0 && nl > 257))
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed distance tree";
- else if (r == Z_BUF_ERROR) {
-#ifdef PKZIP_BUG_WORKAROUND
- r = Z_OK;
- }
-#else
- z->msg = (char*)"incomplete distance tree";
- r = Z_DATA_ERROR;
- }
- else if (r != Z_MEM_ERROR)
- {
- z->msg = (char*)"empty distance tree with lengths";
- r = Z_DATA_ERROR;
+ /* point entry in root table to sub-table */
+ low = huff & mask;
+ (*table)[low].op = (unsigned char)curr;
+ (*table)[low].bits = (unsigned char)root;
+ (*table)[low].val = (unsigned short)(next - *table);
+ }
}
- return r;
-#endif
- }
- /* done */
- return Z_OK;
-}
+ /*
+ Fill in rest of table for incomplete codes. This loop is similar to the
+ loop above in incrementing huff for table indices. It is assumed that
+ len is equal to curr + drop, so there is no loop needed to increment
+ through high index bits. When the current sub-table is filled, the loop
+ drops back to the root table to fill in any remaining entries there.
+ */
+ this.op = (unsigned char)64; /* invalid code marker */
+ this.bits = (unsigned char)(len - drop);
+ this.val = (unsigned short)0;
+ while (huff != 0) {
+ /* when done with sub-table, drop back to root table */
+ if (drop != 0 && (huff & mask) != low) {
+ drop = 0;
+ len = root;
+ next = *table;
+ this.bits = (unsigned char)len;
+ }
+ /* put invalid code marker in table */
+ next[huff >> drop] = this;
-int zlib_inflate_trees_fixed(
- uInt *bl, /* literal desired/actual bit depth */
- uInt *bd, /* distance desired/actual bit depth */
- inflate_huft **tl, /* literal/length tree result */
- inflate_huft **td, /* distance tree result */
- inflate_huft *hp, /* space for trees */
- z_streamp z /* for memory allocation */
-)
-{
- int i; /* temporary variable */
- unsigned l[288]; /* length list for huft_build */
- uInt *v; /* work area for huft_build */
-
- /* set up literal table */
- for (i = 0; i < 144; i++)
- l[i] = 8;
- for (; i < 256; i++)
- l[i] = 9;
- for (; i < 280; i++)
- l[i] = 7;
- for (; i < 288; i++) /* make a complete, but wrong code set */
- l[i] = 8;
- *bl = 9;
- v = WS(z)->tree_work_area_1;
- if ((i = huft_build(l, 288, 257, cplens, cplext, tl, bl, hp, &i, v)) != 0)
- return i;
-
- /* set up distance table */
- for (i = 0; i < 30; i++) /* make an incomplete code set */
- l[i] = 5;
- *bd = 5;
- if ((i = huft_build(l, 30, 0, cpdist, cpdext, td, bd, hp, &i, v)) > 1)
- return i;
-
- return Z_OK;
+ /* backwards increment the len-bit code huff */
+ incr = 1U << (len - 1);
+ while (huff & incr)
+ incr >>= 1;
+ if (incr != 0) {
+ huff &= incr - 1;
+ huff += incr;
+ }
+ else
+ huff = 0;
+ }
+
+ /* set return parameters */
+ *table += used;
+ *bits = root;
+ return 0;
}
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h
index e37705adc00..5f5219b1240 100644
--- a/lib/zlib_inflate/inftrees.h
+++ b/lib/zlib_inflate/inftrees.h
@@ -1,6 +1,6 @@
/* inftrees.h -- header to use inftrees.c
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ * Copyright (C) 1995-2005 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
*/
/* WARNING: this file should *not* be used by applications. It is
@@ -8,57 +8,48 @@
subject to change. Applications should only use zlib.h.
*/
-/* Huffman code lookup table entry--this entry is four bytes for machines
- that have 16-bit pointers (e.g. PC's in the small or medium model). */
-
-#ifndef _INFTREES_H
-#define _INFTREES_H
-
-typedef struct inflate_huft_s inflate_huft;
-
-struct inflate_huft_s {
- union {
- struct {
- Byte Exop; /* number of extra bits or operation */
- Byte Bits; /* number of bits in this code or subcode */
- } what;
- uInt pad; /* pad structure to a power of 2 (4 bytes for */
- } word; /* 16-bit, 8 bytes for 32-bit int's) */
- uInt base; /* literal, length base, distance base,
- or table offset */
-};
+/* Structure for decoding tables. Each entry provides either the
+ information needed to do the operation requested by the code that
+ indexed that table entry, or it provides a pointer to another
+ table that indexes more bits of the code. op indicates whether
+ the entry is a pointer to another table, a literal, a length or
+ distance, an end-of-block, or an invalid code. For a table
+ pointer, the low four bits of op is the number of index bits of
+ that table. For a length or distance, the low four bits of op
+ is the number of extra bits to get after the code. bits is
+ the number of bits in this code or part of the code to drop off
+ of the bit buffer. val is the actual byte to output in the case
+ of a literal, the base length or distance, or the offset from
+ the current table to the next table. Each entry is four bytes. */
+typedef struct {
+ unsigned char op; /* operation, extra bits, table bits */
+ unsigned char bits; /* bits in this part of the code */
+ unsigned short val; /* offset in table or code value */
+} code;
+
+/* op values as set by inflate_table():
+ 00000000 - literal
+ 0000tttt - table link, tttt != 0 is the number of table index bits
+ 0001eeee - length or distance, eeee is the number of extra bits
+ 01100000 - end of block
+ 01000000 - invalid code
+ */
/* Maximum size of dynamic tree. The maximum found in a long but non-
- exhaustive search was 1004 huft structures (850 for length/literals
- and 154 for distances, the latter actually the result of an
- exhaustive search). The actual maximum is not known, but the
- value below is more than safe. */
-#define MANY 1440
-
-extern int zlib_inflate_trees_bits (
- uInt *, /* 19 code lengths */
- uInt *, /* bits tree desired/actual depth */
- inflate_huft **, /* bits tree result */
- inflate_huft *, /* space for trees */
- z_streamp); /* for messages */
-
-extern int zlib_inflate_trees_dynamic (
- uInt, /* number of literal/length codes */
- uInt, /* number of distance codes */
- uInt *, /* that many (total) code lengths */
- uInt *, /* literal desired/actual bit depth */
- uInt *, /* distance desired/actual bit depth */
- inflate_huft **, /* literal/length tree result */
- inflate_huft **, /* distance tree result */
- inflate_huft *, /* space for trees */
- z_streamp); /* for messages */
-
-extern int zlib_inflate_trees_fixed (
- uInt *, /* literal desired/actual bit depth */
- uInt *, /* distance desired/actual bit depth */
- inflate_huft **, /* literal/length tree result */
- inflate_huft **, /* distance tree result */
- inflate_huft *, /* space for trees */
- z_streamp); /* for memory allocation */
-
-#endif /* _INFTREES_H */
+ exhaustive search was 1444 code structures (852 for length/literals
+ and 592 for distances, the latter actually the result of an
+ exhaustive search). The true maximum is not known, but the value
+ below is more than safe. */
+#define ENOUGH 2048
+#define MAXD 592
+
+/* Type of code to build for inftable() */
+typedef enum {
+ CODES,
+ LENS,
+ DISTS
+} codetype;
+
+extern int zlib_inflate_table (codetype type, unsigned short *lens,
+ unsigned codes, code **table,
+ unsigned *bits, unsigned short *work);
diff --git a/lib/zlib_inflate/infutil.c b/lib/zlib_inflate/infutil.c
deleted file mode 100644
index 00202b3438e..00000000000
--- a/lib/zlib_inflate/infutil.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/* inflate_util.c -- data and routines common to blocks and codes
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#include <linux/zutil.h>
-#include "infblock.h"
-#include "inftrees.h"
-#include "infcodes.h"
-#include "infutil.h"
-
-struct inflate_codes_state;
-
-/* And'ing with mask[n] masks the lower n bits */
-uInt zlib_inflate_mask[17] = {
- 0x0000,
- 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
- 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
-};
-
-
-/* copy as much as possible from the sliding window to the output area */
-int zlib_inflate_flush(
- inflate_blocks_statef *s,
- z_streamp z,
- int r
-)
-{
- uInt n;
- Byte *p;
- Byte *q;
-
- /* local copies of source and destination pointers */
- p = z->next_out;
- q = s->read;
-
- /* compute number of bytes to copy as far as end of window */
- n = (uInt)((q <= s->write ? s->write : s->end) - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy as far as end of window */
- memcpy(p, q, n);
- p += n;
- q += n;
-
- /* see if more to copy at beginning of window */
- if (q == s->end)
- {
- /* wrap pointers */
- q = s->window;
- if (s->write == s->end)
- s->write = s->window;
-
- /* compute bytes to copy */
- n = (uInt)(s->write - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy */
- memcpy(p, q, n);
- p += n;
- q += n;
- }
-
- /* update pointers */
- z->next_out = p;
- s->read = q;
-
- /* done */
- return r;
-}
diff --git a/lib/zlib_inflate/infutil.h b/lib/zlib_inflate/infutil.h
index a15875fc5f7..eb1a9007bd8 100644
--- a/lib/zlib_inflate/infutil.h
+++ b/lib/zlib_inflate/infutil.h
@@ -11,184 +11,12 @@
#ifndef _INFUTIL_H
#define _INFUTIL_H
-#include <linux/zconf.h>
-#include "inftrees.h"
-#include "infcodes.h"
-
-typedef enum {
- TYPE, /* get type bits (3, including end bit) */
- LENS, /* get lengths for stored */
- STORED, /* processing stored block */
- TABLE, /* get table lengths */
- BTREE, /* get bit lengths tree for a dynamic block */
- DTREE, /* get length, distance trees for a dynamic block */
- CODES, /* processing fixed or dynamic block */
- DRY, /* output remaining window bytes */
- B_DONE, /* finished last block, done */
- B_BAD} /* got a data error--stuck here */
-inflate_block_mode;
-
-/* inflate blocks semi-private state */
-struct inflate_blocks_state {
-
- /* mode */
- inflate_block_mode mode; /* current inflate_block mode */
-
- /* mode dependent information */
- union {
- uInt left; /* if STORED, bytes left to copy */
- struct {
- uInt table; /* table lengths (14 bits) */
- uInt index; /* index into blens (or border) */
- uInt *blens; /* bit lengths of codes */
- uInt bb; /* bit length tree depth */
- inflate_huft *tb; /* bit length decoding tree */
- } trees; /* if DTREE, decoding info for trees */
- struct {
- inflate_codes_statef
- *codes;
- } decode; /* if CODES, current state */
- } sub; /* submode */
- uInt last; /* true if this block is the last block */
-
- /* mode independent information */
- uInt bitk; /* bits in bit buffer */
- uLong bitb; /* bit buffer */
- inflate_huft *hufts; /* single malloc for tree space */
- Byte *window; /* sliding window */
- Byte *end; /* one byte after sliding window */
- Byte *read; /* window read pointer */
- Byte *write; /* window write pointer */
- check_func checkfn; /* check function */
- uLong check; /* check on output */
-
-};
-
-
-/* defines for inflate input/output */
-/* update pointers and return */
-#define UPDBITS {s->bitb=b;s->bitk=k;}
-#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
-#define UPDOUT {s->write=q;}
-#define UPDATE {UPDBITS UPDIN UPDOUT}
-#define LEAVE {UPDATE return zlib_inflate_flush(s,z,r);}
-/* get bytes and bits */
-#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
-#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
-#define NEXTBYTE (n--,*p++)
-#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define DUMPBITS(j) {b>>=(j);k-=(j);}
-/* output bytes */
-#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
-#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
-#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
-#define FLUSH {UPDOUT r=zlib_inflate_flush(s,z,r); LOADOUT}
-#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
-#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
-/* load local pointers */
-#define LOAD {LOADIN LOADOUT}
-
-/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
-extern uInt zlib_inflate_mask[17];
-
-/* copy as much as possible from the sliding window to the output area */
-extern int zlib_inflate_flush (
- inflate_blocks_statef *,
- z_streamp ,
- int);
-
-/* inflate private state */
-typedef enum {
- METHOD, /* waiting for method byte */
- FLAG, /* waiting for flag byte */
- DICT4, /* four dictionary check bytes to go */
- DICT3, /* three dictionary check bytes to go */
- DICT2, /* two dictionary check bytes to go */
- DICT1, /* one dictionary check byte to go */
- DICT0, /* waiting for inflateSetDictionary */
- BLOCKS, /* decompressing blocks */
- CHECK4, /* four check bytes to go */
- CHECK3, /* three check bytes to go */
- CHECK2, /* two check bytes to go */
- CHECK1, /* one check byte to go */
- I_DONE, /* finished check, done */
- I_BAD} /* got an error--stay here */
-inflate_mode;
-
-struct internal_state {
-
- /* mode */
- inflate_mode mode; /* current inflate mode */
-
- /* mode dependent information */
- union {
- uInt method; /* if FLAGS, method byte */
- struct {
- uLong was; /* computed check value */
- uLong need; /* stream check value */
- } check; /* if CHECK, check values to compare */
- uInt marker; /* if BAD, inflateSync's marker bytes count */
- } sub; /* submode */
-
- /* mode independent information */
- int nowrap; /* flag for no wrapper */
- uInt wbits; /* log2(window size) (8..15, defaults to 15) */
- inflate_blocks_statef
- *blocks; /* current inflate_blocks state */
-
-};
-
-/* inflate codes private state */
-typedef enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- START, /* x: set up for LEN */
- LEN, /* i: get length/literal/eob next */
- LENEXT, /* i: getting length extra (have base) */
- DIST, /* i: get distance next */
- DISTEXT, /* i: getting distance extra */
- COPY, /* o: copying bytes in window, waiting for space */
- LIT, /* o: got literal, waiting for output space */
- WASH, /* o: got eob, possibly still output waiting */
- END, /* x: got eob and all data flushed */
- BADCODE} /* x: got error */
-inflate_codes_mode;
-
-struct inflate_codes_state {
-
- /* mode */
- inflate_codes_mode mode; /* current inflate_codes mode */
-
- /* mode dependent information */
- uInt len;
- union {
- struct {
- inflate_huft *tree; /* pointer into tree */
- uInt need; /* bits needed */
- } code; /* if LEN or DIST, where in tree */
- uInt lit; /* if LIT, literal */
- struct {
- uInt get; /* bits to get for extra */
- uInt dist; /* distance back to copy from */
- } copy; /* if EXT or COPY, where and how much */
- } sub; /* submode */
-
- /* mode independent information */
- Byte lbits; /* ltree bits decoded per branch */
- Byte dbits; /* dtree bits decoder per branch */
- inflate_huft *ltree; /* literal/length/eob tree */
- inflate_huft *dtree; /* distance tree */
-
-};
+#include <linux/zlib.h>
/* memory allocation for inflation */
struct inflate_workspace {
- inflate_codes_statef working_state;
- struct inflate_blocks_state working_blocks_state;
- struct internal_state internal_state;
- unsigned int tree_work_area_1[19];
- unsigned int tree_work_area_2[288];
- unsigned working_blens[258 + 0x1f + 0x1f];
- inflate_huft working_hufts[MANY];
+ struct inflate_state inflate_state;
unsigned char working_window[1 << MAX_WBITS];
};