Merge commit 'v2.6.34-rc7' into perf/nmi
Merge reason: catch up with latest softlockup detector changes.
此提交包含在:
@@ -160,6 +160,9 @@ config TEXTSEARCH_BM
|
||||
config TEXTSEARCH_FSM
|
||||
tristate
|
||||
|
||||
config BTREE
|
||||
boolean
|
||||
|
||||
config HAS_IOMEM
|
||||
boolean
|
||||
depends on !NO_IOMEM
|
||||
|
@@ -103,7 +103,8 @@ config HEADERS_CHECK
|
||||
|
||||
config DEBUG_SECTION_MISMATCH
|
||||
bool "Enable full Section mismatch analysis"
|
||||
depends on UNDEFINED
|
||||
depends on UNDEFINED || (BLACKFIN)
|
||||
default y
|
||||
# This option is on purpose disabled for now.
|
||||
# It will be enabled when we are down to a reasonable number
|
||||
# of section mismatch warnings (< 10 for an allyesconfig build)
|
||||
@@ -367,7 +368,7 @@ config SLUB_STATS
|
||||
config DEBUG_KMEMLEAK
|
||||
bool "Kernel memory leak detector"
|
||||
depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
|
||||
(X86 || ARM || PPC || S390)
|
||||
(X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
|
||||
|
||||
select DEBUG_FS if SYSFS
|
||||
select STACKTRACE if STACKTRACE_SUPPORT
|
||||
@@ -511,6 +512,18 @@ config PROVE_LOCKING
|
||||
|
||||
For more details, see Documentation/lockdep-design.txt.
|
||||
|
||||
config PROVE_RCU
|
||||
bool "RCU debugging: prove RCU correctness"
|
||||
depends on PROVE_LOCKING
|
||||
default n
|
||||
help
|
||||
This feature enables lockdep extensions that check for correct
|
||||
use of RCU APIs. This is currently under development. Say Y
|
||||
if you want to debug RCU usage or help work on the PROVE_RCU
|
||||
feature.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
config LOCKDEP
|
||||
bool
|
||||
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
|
||||
@@ -532,6 +545,14 @@ config LOCK_STAT
|
||||
|
||||
For more details, see Documentation/lockstat.txt
|
||||
|
||||
This also enables lock events required by "perf lock",
|
||||
subcommand of perf.
|
||||
If you want to use "perf lock", you also need to turn on
|
||||
CONFIG_EVENT_TRACING.
|
||||
|
||||
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
|
||||
(CONFIG_LOCKDEP defines "acquire" and "release" events.)
|
||||
|
||||
config DEBUG_LOCKDEP
|
||||
bool "Lock dependency engine debugging"
|
||||
depends on DEBUG_KERNEL && LOCKDEP
|
||||
@@ -777,10 +798,22 @@ config RCU_CPU_STALL_DETECTOR
|
||||
CPUs are delaying the current grace period, but only when
|
||||
the grace period extends for excessive time periods.
|
||||
|
||||
Say Y if you want RCU to perform such checks.
|
||||
Say N if you want to disable such checks.
|
||||
|
||||
Say Y if you are unsure.
|
||||
|
||||
config RCU_CPU_STALL_VERBOSE
|
||||
bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
|
||||
depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
|
||||
default n
|
||||
help
|
||||
This option causes RCU to printk detailed per-task information
|
||||
for any tasks that are stalling the current RCU grace period.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
Say Y if you want to enable such checks.
|
||||
|
||||
config KPROBES_SANITY_TEST
|
||||
bool "Kprobes sanity tests"
|
||||
depends on DEBUG_KERNEL
|
||||
@@ -852,8 +885,7 @@ config DEBUG_FORCE_WEAK_PER_CPU
|
||||
|
||||
config LKDTM
|
||||
tristate "Linux Kernel Dump Test Tool Module"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on KPROBES
|
||||
depends on DEBUG_FS
|
||||
depends on BLOCK
|
||||
default n
|
||||
help
|
||||
@@ -864,7 +896,7 @@ config LKDTM
|
||||
called lkdtm.
|
||||
|
||||
Documentation on how to use the module can be found in
|
||||
drivers/misc/lkdtm.c
|
||||
Documentation/fault-injection/provoke-crashes.txt
|
||||
|
||||
config FAULT_INJECTION
|
||||
bool "Fault-injection framework"
|
||||
|
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
|
||||
|
||||
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
|
||||
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
|
||||
string_helpers.o gcd.o list_sort.o
|
||||
string_helpers.o gcd.o lcm.o list_sort.o
|
||||
|
||||
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
|
||||
CFLAGS_kobject.o += -DDEBUG
|
||||
@@ -41,6 +41,7 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
|
||||
obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
|
||||
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
|
||||
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
|
||||
obj-$(CONFIG_BTREE) += btree.o
|
||||
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
|
||||
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
|
||||
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
|
||||
|
19
lib/bitmap.c
19
lib/bitmap.c
@@ -487,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
|
||||
EXPORT_SYMBOL(__bitmap_parse);
|
||||
|
||||
/**
|
||||
* bitmap_parse_user()
|
||||
* bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
|
||||
*
|
||||
* @ubuf: pointer to user buffer containing string.
|
||||
* @ulen: buffer size in bytes. If string is smaller than this
|
||||
@@ -619,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
|
||||
EXPORT_SYMBOL(bitmap_parselist);
|
||||
|
||||
/**
|
||||
* bitmap_pos_to_ord(buf, pos, bits)
|
||||
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
|
||||
* @buf: pointer to a bitmap
|
||||
* @pos: a bit position in @buf (0 <= @pos < @bits)
|
||||
* @bits: number of valid bit positions in @buf
|
||||
@@ -655,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
|
||||
}
|
||||
|
||||
/**
|
||||
* bitmap_ord_to_pos(buf, ord, bits)
|
||||
* bitmap_ord_to_pos - find position of n-th set bit in bitmap
|
||||
* @buf: pointer to bitmap
|
||||
* @ord: ordinal bit position (n-th set bit, n >= 0)
|
||||
* @bits: number of valid bit positions in @buf
|
||||
@@ -733,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
|
||||
bitmap_zero(dst, bits);
|
||||
|
||||
w = bitmap_weight(new, bits);
|
||||
for (oldbit = find_first_bit(src, bits);
|
||||
oldbit < bits;
|
||||
oldbit = find_next_bit(src, bits, oldbit + 1)) {
|
||||
for_each_set_bit(oldbit, src, bits) {
|
||||
int n = bitmap_pos_to_ord(old, oldbit, bits);
|
||||
|
||||
if (n < 0 || w == 0)
|
||||
set_bit(oldbit, dst); /* identity map */
|
||||
else
|
||||
@@ -903,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
|
||||
*/
|
||||
|
||||
m = 0;
|
||||
for (n = find_first_bit(relmap, bits);
|
||||
n < bits;
|
||||
n = find_next_bit(relmap, bits, n + 1)) {
|
||||
for_each_set_bit(n, relmap, bits) {
|
||||
/* m == bitmap_pos_to_ord(relmap, n, bits) */
|
||||
if (test_bit(m, orig))
|
||||
set_bit(n, dst);
|
||||
@@ -934,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
|
||||
return;
|
||||
bitmap_zero(dst, bits);
|
||||
|
||||
for (oldbit = find_first_bit(orig, bits);
|
||||
oldbit < bits;
|
||||
oldbit = find_next_bit(orig, bits, oldbit + 1))
|
||||
for_each_set_bit(oldbit, orig, bits)
|
||||
set_bit(oldbit % sz, dst);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_fold);
|
||||
|
797
lib/btree.c
一般檔案
797
lib/btree.c
一般檔案
@@ -0,0 +1,797 @@
|
||||
/*
|
||||
* lib/btree.c - Simple In-memory B+Tree
|
||||
*
|
||||
* As should be obvious for Linux kernel code, license is GPLv2
|
||||
*
|
||||
* Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
|
||||
* Bits and pieces stolen from Peter Zijlstra's code, which is
|
||||
* Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* GPLv2
|
||||
*
|
||||
* see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
|
||||
*
|
||||
* A relatively simple B+Tree implementation. I have written it as a learning
|
||||
* excercise to understand how B+Trees work. Turned out to be useful as well.
|
||||
*
|
||||
* B+Trees can be used similar to Linux radix trees (which don't have anything
|
||||
* in common with textbook radix trees, beware). Prerequisite for them working
|
||||
* well is that access to a random tree node is much faster than a large number
|
||||
* of operations within each node.
|
||||
*
|
||||
* Disks have fulfilled the prerequisite for a long time. More recently DRAM
|
||||
* has gained similar properties, as memory access times, when measured in cpu
|
||||
* cycles, have increased. Cacheline sizes have increased as well, which also
|
||||
* helps B+Trees.
|
||||
*
|
||||
* Compared to radix trees, B+Trees are more efficient when dealing with a
|
||||
* sparsely populated address space. Between 25% and 50% of the memory is
|
||||
* occupied with valid pointers. When densely populated, radix trees contain
|
||||
* ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
|
||||
* pointers.
|
||||
*
|
||||
* This particular implementation stores pointers identified by a long value.
|
||||
* Storing NULL pointers is illegal, lookup will return NULL when no entry
|
||||
* was found.
|
||||
*
|
||||
* A tricks was used that is not commonly found in textbooks. The lowest
|
||||
* values are to the right, not to the left. All used slots within a node
|
||||
* are on the left, all unused slots contain NUL values. Most operations
|
||||
* simply loop once over all slots and terminate on the first NUL.
|
||||
*/
|
||||
|
||||
#include <linux/btree.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
#define NODESIZE MAX(L1_CACHE_BYTES, 128)
|
||||
|
||||
struct btree_geo {
|
||||
int keylen;
|
||||
int no_pairs;
|
||||
int no_longs;
|
||||
};
|
||||
|
||||
struct btree_geo btree_geo32 = {
|
||||
.keylen = 1,
|
||||
.no_pairs = NODESIZE / sizeof(long) / 2,
|
||||
.no_longs = NODESIZE / sizeof(long) / 2,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(btree_geo32);
|
||||
|
||||
#define LONG_PER_U64 (64 / BITS_PER_LONG)
|
||||
struct btree_geo btree_geo64 = {
|
||||
.keylen = LONG_PER_U64,
|
||||
.no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
|
||||
.no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(btree_geo64);
|
||||
|
||||
struct btree_geo btree_geo128 = {
|
||||
.keylen = 2 * LONG_PER_U64,
|
||||
.no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
|
||||
.no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(btree_geo128);
|
||||
|
||||
static struct kmem_cache *btree_cachep;
|
||||
|
||||
void *btree_alloc(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
return kmem_cache_alloc(btree_cachep, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_alloc);
|
||||
|
||||
void btree_free(void *element, void *pool_data)
|
||||
{
|
||||
kmem_cache_free(btree_cachep, element);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_free);
|
||||
|
||||
static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
|
||||
{
|
||||
unsigned long *node;
|
||||
|
||||
node = mempool_alloc(head->mempool, gfp);
|
||||
memset(node, 0, NODESIZE);
|
||||
return node;
|
||||
}
|
||||
|
||||
static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (l1[i] < l2[i])
|
||||
return -1;
|
||||
if (l1[i] > l2[i])
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
|
||||
size_t n)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
dest[i] = src[i];
|
||||
return dest;
|
||||
}
|
||||
|
||||
static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
s[i] = c;
|
||||
return s;
|
||||
}
|
||||
|
||||
static void dec_key(struct btree_geo *geo, unsigned long *key)
|
||||
{
|
||||
unsigned long val;
|
||||
int i;
|
||||
|
||||
for (i = geo->keylen - 1; i >= 0; i--) {
|
||||
val = key[i];
|
||||
key[i] = val - 1;
|
||||
if (val)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
|
||||
{
|
||||
return &node[n * geo->keylen];
|
||||
}
|
||||
|
||||
static void *bval(struct btree_geo *geo, unsigned long *node, int n)
|
||||
{
|
||||
return (void *)node[geo->no_longs + n];
|
||||
}
|
||||
|
||||
static void setkey(struct btree_geo *geo, unsigned long *node, int n,
|
||||
unsigned long *key)
|
||||
{
|
||||
longcpy(bkey(geo, node, n), key, geo->keylen);
|
||||
}
|
||||
|
||||
static void setval(struct btree_geo *geo, unsigned long *node, int n,
|
||||
void *val)
|
||||
{
|
||||
node[geo->no_longs + n] = (unsigned long) val;
|
||||
}
|
||||
|
||||
static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
|
||||
{
|
||||
longset(bkey(geo, node, n), 0, geo->keylen);
|
||||
node[geo->no_longs + n] = 0;
|
||||
}
|
||||
|
||||
static inline void __btree_init(struct btree_head *head)
|
||||
{
|
||||
head->node = NULL;
|
||||
head->height = 0;
|
||||
}
|
||||
|
||||
void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
|
||||
{
|
||||
__btree_init(head);
|
||||
head->mempool = mempool;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_init_mempool);
|
||||
|
||||
int btree_init(struct btree_head *head)
|
||||
{
|
||||
__btree_init(head);
|
||||
head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
|
||||
if (!head->mempool)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_init);
|
||||
|
||||
void btree_destroy(struct btree_head *head)
|
||||
{
|
||||
mempool_destroy(head->mempool);
|
||||
head->mempool = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_destroy);
|
||||
|
||||
void *btree_last(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key)
|
||||
{
|
||||
int height = head->height;
|
||||
unsigned long *node = head->node;
|
||||
|
||||
if (height == 0)
|
||||
return NULL;
|
||||
|
||||
for ( ; height > 1; height--)
|
||||
node = bval(geo, node, 0);
|
||||
|
||||
longcpy(key, bkey(geo, node, 0), geo->keylen);
|
||||
return bval(geo, node, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_last);
|
||||
|
||||
static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
|
||||
unsigned long *key)
|
||||
{
|
||||
return longcmp(bkey(geo, node, pos), key, geo->keylen);
|
||||
}
|
||||
|
||||
static int keyzero(struct btree_geo *geo, unsigned long *key)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < geo->keylen; i++)
|
||||
if (key[i])
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key)
|
||||
{
|
||||
int i, height = head->height;
|
||||
unsigned long *node = head->node;
|
||||
|
||||
if (height == 0)
|
||||
return NULL;
|
||||
|
||||
for ( ; height > 1; height--) {
|
||||
for (i = 0; i < geo->no_pairs; i++)
|
||||
if (keycmp(geo, node, i, key) <= 0)
|
||||
break;
|
||||
if (i == geo->no_pairs)
|
||||
return NULL;
|
||||
node = bval(geo, node, i);
|
||||
if (!node)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!node)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < geo->no_pairs; i++)
|
||||
if (keycmp(geo, node, i, key) == 0)
|
||||
return bval(geo, node, i);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_lookup);
|
||||
|
||||
int btree_update(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key, void *val)
|
||||
{
|
||||
int i, height = head->height;
|
||||
unsigned long *node = head->node;
|
||||
|
||||
if (height == 0)
|
||||
return -ENOENT;
|
||||
|
||||
for ( ; height > 1; height--) {
|
||||
for (i = 0; i < geo->no_pairs; i++)
|
||||
if (keycmp(geo, node, i, key) <= 0)
|
||||
break;
|
||||
if (i == geo->no_pairs)
|
||||
return -ENOENT;
|
||||
node = bval(geo, node, i);
|
||||
if (!node)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (!node)
|
||||
return -ENOENT;
|
||||
|
||||
for (i = 0; i < geo->no_pairs; i++)
|
||||
if (keycmp(geo, node, i, key) == 0) {
|
||||
setval(geo, node, i, val);
|
||||
return 0;
|
||||
}
|
||||
return -ENOENT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_update);
|
||||
|
||||
/*
|
||||
* Usually this function is quite similar to normal lookup. But the key of
|
||||
* a parent node may be smaller than the smallest key of all its siblings.
|
||||
* In such a case we cannot just return NULL, as we have only proven that no
|
||||
* key smaller than __key, but larger than this parent key exists.
|
||||
* So we set __key to the parent key and retry. We have to use the smallest
|
||||
* such parent key, which is the last parent key we encountered.
|
||||
*/
|
||||
void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *__key)
|
||||
{
|
||||
int i, height;
|
||||
unsigned long *node, *oldnode;
|
||||
unsigned long *retry_key = NULL, key[geo->keylen];
|
||||
|
||||
if (keyzero(geo, __key))
|
||||
return NULL;
|
||||
|
||||
if (head->height == 0)
|
||||
return NULL;
|
||||
retry:
|
||||
longcpy(key, __key, geo->keylen);
|
||||
dec_key(geo, key);
|
||||
|
||||
node = head->node;
|
||||
for (height = head->height ; height > 1; height--) {
|
||||
for (i = 0; i < geo->no_pairs; i++)
|
||||
if (keycmp(geo, node, i, key) <= 0)
|
||||
break;
|
||||
if (i == geo->no_pairs)
|
||||
goto miss;
|
||||
oldnode = node;
|
||||
node = bval(geo, node, i);
|
||||
if (!node)
|
||||
goto miss;
|
||||
retry_key = bkey(geo, oldnode, i);
|
||||
}
|
||||
|
||||
if (!node)
|
||||
goto miss;
|
||||
|
||||
for (i = 0; i < geo->no_pairs; i++) {
|
||||
if (keycmp(geo, node, i, key) <= 0) {
|
||||
if (bval(geo, node, i)) {
|
||||
longcpy(__key, bkey(geo, node, i), geo->keylen);
|
||||
return bval(geo, node, i);
|
||||
} else
|
||||
goto miss;
|
||||
}
|
||||
}
|
||||
miss:
|
||||
if (retry_key) {
|
||||
__key = retry_key;
|
||||
retry_key = NULL;
|
||||
goto retry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int getpos(struct btree_geo *geo, unsigned long *node,
|
||||
unsigned long *key)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < geo->no_pairs; i++) {
|
||||
if (keycmp(geo, node, i, key) <= 0)
|
||||
break;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
static int getfill(struct btree_geo *geo, unsigned long *node, int start)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = start; i < geo->no_pairs; i++)
|
||||
if (!bval(geo, node, i))
|
||||
break;
|
||||
return i;
|
||||
}
|
||||
|
||||
/*
|
||||
* locate the correct leaf node in the btree
|
||||
*/
|
||||
static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key, int level)
|
||||
{
|
||||
unsigned long *node = head->node;
|
||||
int i, height;
|
||||
|
||||
for (height = head->height; height > level; height--) {
|
||||
for (i = 0; i < geo->no_pairs; i++)
|
||||
if (keycmp(geo, node, i, key) <= 0)
|
||||
break;
|
||||
|
||||
if ((i == geo->no_pairs) || !bval(geo, node, i)) {
|
||||
/* right-most key is too large, update it */
|
||||
/* FIXME: If the right-most key on higher levels is
|
||||
* always zero, this wouldn't be necessary. */
|
||||
i--;
|
||||
setkey(geo, node, i, key);
|
||||
}
|
||||
BUG_ON(i < 0);
|
||||
node = bval(geo, node, i);
|
||||
}
|
||||
BUG_ON(!node);
|
||||
return node;
|
||||
}
|
||||
|
||||
static int btree_grow(struct btree_head *head, struct btree_geo *geo,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned long *node;
|
||||
int fill;
|
||||
|
||||
node = btree_node_alloc(head, gfp);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
if (head->node) {
|
||||
fill = getfill(geo, head->node, 0);
|
||||
setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
|
||||
setval(geo, node, 0, head->node);
|
||||
}
|
||||
head->node = node;
|
||||
head->height++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
|
||||
{
|
||||
unsigned long *node;
|
||||
int fill;
|
||||
|
||||
if (head->height <= 1)
|
||||
return;
|
||||
|
||||
node = head->node;
|
||||
fill = getfill(geo, node, 0);
|
||||
BUG_ON(fill > 1);
|
||||
head->node = bval(geo, node, 0);
|
||||
head->height--;
|
||||
mempool_free(node, head->mempool);
|
||||
}
|
||||
|
||||
static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key, void *val, int level,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned long *node;
|
||||
int i, pos, fill, err;
|
||||
|
||||
BUG_ON(!val);
|
||||
if (head->height < level) {
|
||||
err = btree_grow(head, geo, gfp);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
retry:
|
||||
node = find_level(head, geo, key, level);
|
||||
pos = getpos(geo, node, key);
|
||||
fill = getfill(geo, node, pos);
|
||||
/* two identical keys are not allowed */
|
||||
BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
|
||||
|
||||
if (fill == geo->no_pairs) {
|
||||
/* need to split node */
|
||||
unsigned long *new;
|
||||
|
||||
new = btree_node_alloc(head, gfp);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
err = btree_insert_level(head, geo,
|
||||
bkey(geo, node, fill / 2 - 1),
|
||||
new, level + 1, gfp);
|
||||
if (err) {
|
||||
mempool_free(new, head->mempool);
|
||||
return err;
|
||||
}
|
||||
for (i = 0; i < fill / 2; i++) {
|
||||
setkey(geo, new, i, bkey(geo, node, i));
|
||||
setval(geo, new, i, bval(geo, node, i));
|
||||
setkey(geo, node, i, bkey(geo, node, i + fill / 2));
|
||||
setval(geo, node, i, bval(geo, node, i + fill / 2));
|
||||
clearpair(geo, node, i + fill / 2);
|
||||
}
|
||||
if (fill & 1) {
|
||||
setkey(geo, node, i, bkey(geo, node, fill - 1));
|
||||
setval(geo, node, i, bval(geo, node, fill - 1));
|
||||
clearpair(geo, node, fill - 1);
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
BUG_ON(fill >= geo->no_pairs);
|
||||
|
||||
/* shift and insert */
|
||||
for (i = fill; i > pos; i--) {
|
||||
setkey(geo, node, i, bkey(geo, node, i - 1));
|
||||
setval(geo, node, i, bval(geo, node, i - 1));
|
||||
}
|
||||
setkey(geo, node, pos, key);
|
||||
setval(geo, node, pos, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btree_insert(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key, void *val, gfp_t gfp)
|
||||
{
|
||||
return btree_insert_level(head, geo, key, val, 1, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_insert);
|
||||
|
||||
static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key, int level);
|
||||
static void merge(struct btree_head *head, struct btree_geo *geo, int level,
|
||||
unsigned long *left, int lfill,
|
||||
unsigned long *right, int rfill,
|
||||
unsigned long *parent, int lpos)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rfill; i++) {
|
||||
/* Move all keys to the left */
|
||||
setkey(geo, left, lfill + i, bkey(geo, right, i));
|
||||
setval(geo, left, lfill + i, bval(geo, right, i));
|
||||
}
|
||||
/* Exchange left and right child in parent */
|
||||
setval(geo, parent, lpos, right);
|
||||
setval(geo, parent, lpos + 1, left);
|
||||
/* Remove left (formerly right) child from parent */
|
||||
btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
|
||||
mempool_free(right, head->mempool);
|
||||
}
|
||||
|
||||
static void rebalance(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key, int level, unsigned long *child, int fill)
|
||||
{
|
||||
unsigned long *parent, *left = NULL, *right = NULL;
|
||||
int i, no_left, no_right;
|
||||
|
||||
if (fill == 0) {
|
||||
/* Because we don't steal entries from a neigbour, this case
|
||||
* can happen. Parent node contains a single child, this
|
||||
* node, so merging with a sibling never happens.
|
||||
*/
|
||||
btree_remove_level(head, geo, key, level + 1);
|
||||
mempool_free(child, head->mempool);
|
||||
return;
|
||||
}
|
||||
|
||||
parent = find_level(head, geo, key, level + 1);
|
||||
i = getpos(geo, parent, key);
|
||||
BUG_ON(bval(geo, parent, i) != child);
|
||||
|
||||
if (i > 0) {
|
||||
left = bval(geo, parent, i - 1);
|
||||
no_left = getfill(geo, left, 0);
|
||||
if (fill + no_left <= geo->no_pairs) {
|
||||
merge(head, geo, level,
|
||||
left, no_left,
|
||||
child, fill,
|
||||
parent, i - 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (i + 1 < getfill(geo, parent, i)) {
|
||||
right = bval(geo, parent, i + 1);
|
||||
no_right = getfill(geo, right, 0);
|
||||
if (fill + no_right <= geo->no_pairs) {
|
||||
merge(head, geo, level,
|
||||
child, fill,
|
||||
right, no_right,
|
||||
parent, i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* We could also try to steal one entry from the left or right
|
||||
* neighbor. By not doing so we changed the invariant from
|
||||
* "all nodes are at least half full" to "no two neighboring
|
||||
* nodes can be merged". Which means that the average fill of
|
||||
* all nodes is still half or better.
|
||||
*/
|
||||
}
|
||||
|
||||
static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key, int level)
|
||||
{
|
||||
unsigned long *node;
|
||||
int i, pos, fill;
|
||||
void *ret;
|
||||
|
||||
if (level > head->height) {
|
||||
/* we recursed all the way up */
|
||||
head->height = 0;
|
||||
head->node = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
node = find_level(head, geo, key, level);
|
||||
pos = getpos(geo, node, key);
|
||||
fill = getfill(geo, node, pos);
|
||||
if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
|
||||
return NULL;
|
||||
ret = bval(geo, node, pos);
|
||||
|
||||
/* remove and shift */
|
||||
for (i = pos; i < fill - 1; i++) {
|
||||
setkey(geo, node, i, bkey(geo, node, i + 1));
|
||||
setval(geo, node, i, bval(geo, node, i + 1));
|
||||
}
|
||||
clearpair(geo, node, fill - 1);
|
||||
|
||||
if (fill - 1 < geo->no_pairs / 2) {
|
||||
if (level < head->height)
|
||||
rebalance(head, geo, key, level, node, fill - 1);
|
||||
else if (fill - 1 == 1)
|
||||
btree_shrink(head, geo);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *btree_remove(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *key)
|
||||
{
|
||||
if (head->height == 0)
|
||||
return NULL;
|
||||
|
||||
return btree_remove_level(head, geo, key, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_remove);
|
||||
|
||||
int btree_merge(struct btree_head *target, struct btree_head *victim,
|
||||
struct btree_geo *geo, gfp_t gfp)
|
||||
{
|
||||
unsigned long key[geo->keylen];
|
||||
unsigned long dup[geo->keylen];
|
||||
void *val;
|
||||
int err;
|
||||
|
||||
BUG_ON(target == victim);
|
||||
|
||||
if (!(target->node)) {
|
||||
/* target is empty, just copy fields over */
|
||||
target->node = victim->node;
|
||||
target->height = victim->height;
|
||||
__btree_init(victim);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO: This needs some optimizations. Currently we do three tree
|
||||
* walks to remove a single object from the victim.
|
||||
*/
|
||||
for (;;) {
|
||||
if (!btree_last(victim, geo, key))
|
||||
break;
|
||||
val = btree_lookup(victim, geo, key);
|
||||
err = btree_insert(target, geo, key, val, gfp);
|
||||
if (err)
|
||||
return err;
|
||||
/* We must make a copy of the key, as the original will get
|
||||
* mangled inside btree_remove. */
|
||||
longcpy(dup, key, geo->keylen);
|
||||
btree_remove(victim, geo, dup);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_merge);
|
||||
|
||||
static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long *node, unsigned long opaque,
|
||||
void (*func)(void *elem, unsigned long opaque,
|
||||
unsigned long *key, size_t index,
|
||||
void *func2),
|
||||
void *func2, int reap, int height, size_t count)
|
||||
{
|
||||
int i;
|
||||
unsigned long *child;
|
||||
|
||||
for (i = 0; i < geo->no_pairs; i++) {
|
||||
child = bval(geo, node, i);
|
||||
if (!child)
|
||||
break;
|
||||
if (height > 1)
|
||||
count = __btree_for_each(head, geo, child, opaque,
|
||||
func, func2, reap, height - 1, count);
|
||||
else
|
||||
func(child, opaque, bkey(geo, node, i), count++,
|
||||
func2);
|
||||
}
|
||||
if (reap)
|
||||
mempool_free(node, head->mempool);
|
||||
return count;
|
||||
}
|
||||
|
||||
static void empty(void *elem, unsigned long opaque, unsigned long *key,
|
||||
size_t index, void *func2)
|
||||
{
|
||||
}
|
||||
|
||||
void visitorl(void *elem, unsigned long opaque, unsigned long *key,
|
||||
size_t index, void *__func)
|
||||
{
|
||||
visitorl_t func = __func;
|
||||
|
||||
func(elem, opaque, *key, index);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visitorl);
|
||||
|
||||
void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
|
||||
size_t index, void *__func)
|
||||
{
|
||||
visitor32_t func = __func;
|
||||
u32 *key = (void *)__key;
|
||||
|
||||
func(elem, opaque, *key, index);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visitor32);
|
||||
|
||||
void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
|
||||
size_t index, void *__func)
|
||||
{
|
||||
visitor64_t func = __func;
|
||||
u64 *key = (void *)__key;
|
||||
|
||||
func(elem, opaque, *key, index);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visitor64);
|
||||
|
||||
void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
|
||||
size_t index, void *__func)
|
||||
{
|
||||
visitor128_t func = __func;
|
||||
u64 *key = (void *)__key;
|
||||
|
||||
func(elem, opaque, key[0], key[1], index);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(visitor128);
|
||||
|
||||
size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long opaque,
|
||||
void (*func)(void *elem, unsigned long opaque,
|
||||
unsigned long *key,
|
||||
size_t index, void *func2),
|
||||
void *func2)
|
||||
{
|
||||
size_t count = 0;
|
||||
|
||||
if (!func2)
|
||||
func = empty;
|
||||
if (head->node)
|
||||
count = __btree_for_each(head, geo, head->node, opaque, func,
|
||||
func2, 0, head->height, 0);
|
||||
return count;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_visitor);
|
||||
|
||||
size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
|
||||
unsigned long opaque,
|
||||
void (*func)(void *elem, unsigned long opaque,
|
||||
unsigned long *key,
|
||||
size_t index, void *func2),
|
||||
void *func2)
|
||||
{
|
||||
size_t count = 0;
|
||||
|
||||
if (!func2)
|
||||
func = empty;
|
||||
if (head->node)
|
||||
count = __btree_for_each(head, geo, head->node, opaque, func,
|
||||
func2, 1, head->height, 0);
|
||||
__btree_init(head);
|
||||
return count;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btree_grim_visitor);
|
||||
|
||||
static int __init btree_module_init(void)
|
||||
{
|
||||
btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit btree_module_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(btree_cachep);
|
||||
}
|
||||
|
||||
/* If core code starts using btree, initialization should happen even earlier */
|
||||
module_init(btree_module_init);
|
||||
module_exit(btree_module_exit);
|
||||
|
||||
MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
|
||||
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
|
||||
MODULE_LICENSE("GPL");
|
@@ -1,3 +1,4 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
31
lib/crc32.c
31
lib/crc32.c
@@ -25,16 +25,19 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/atomic.h>
|
||||
#include "crc32defs.h"
|
||||
#if CRC_LE_BITS == 8
|
||||
#define tole(x) __constant_cpu_to_le32(x)
|
||||
#define tobe(x) __constant_cpu_to_be32(x)
|
||||
# define tole(x) __constant_cpu_to_le32(x)
|
||||
#else
|
||||
#define tole(x) (x)
|
||||
#define tobe(x) (x)
|
||||
# define tole(x) (x)
|
||||
#endif
|
||||
|
||||
#if CRC_BE_BITS == 8
|
||||
# define tobe(x) __constant_cpu_to_be32(x)
|
||||
#else
|
||||
# define tobe(x) (x)
|
||||
#endif
|
||||
#include "crc32table.h"
|
||||
|
||||
@@ -52,20 +55,19 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
|
||||
# else
|
||||
# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
|
||||
# endif
|
||||
const u32 *b = (const u32 *)buf;
|
||||
const u32 *b;
|
||||
size_t rem_len;
|
||||
|
||||
/* Align it */
|
||||
if (unlikely((long)b & 3 && len)) {
|
||||
u8 *p = (u8 *)b;
|
||||
if (unlikely((long)buf & 3 && len)) {
|
||||
do {
|
||||
DO_CRC(*p++);
|
||||
} while ((--len) && ((long)p)&3);
|
||||
b = (u32 *)p;
|
||||
DO_CRC(*buf++);
|
||||
} while ((--len) && ((long)buf)&3);
|
||||
}
|
||||
rem_len = len & 3;
|
||||
/* load data 32 bits wide, xor data 32 bits wide. */
|
||||
len = len >> 2;
|
||||
b = (const u32 *)buf;
|
||||
for (--b; len; --len) {
|
||||
crc ^= *++b; /* use pre increment for speed */
|
||||
DO_CRC(0);
|
||||
@@ -82,6 +84,7 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
|
||||
} while (--len);
|
||||
}
|
||||
return crc;
|
||||
#undef DO_CRC
|
||||
}
|
||||
#endif
|
||||
/**
|
||||
@@ -119,9 +122,6 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
|
||||
crc = __cpu_to_le32(crc);
|
||||
crc = crc32_body(crc, p, len, tab);
|
||||
return __le32_to_cpu(crc);
|
||||
#undef ENDIAN_SHIFT
|
||||
#undef DO_CRC
|
||||
|
||||
# elif CRC_LE_BITS == 4
|
||||
while (len--) {
|
||||
crc ^= *p++;
|
||||
@@ -179,9 +179,6 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
|
||||
crc = __cpu_to_be32(crc);
|
||||
crc = crc32_body(crc, p, len, tab);
|
||||
return __be32_to_cpu(crc);
|
||||
#undef ENDIAN_SHIFT
|
||||
#undef DO_CRC
|
||||
|
||||
# elif CRC_BE_BITS == 4
|
||||
while (len--) {
|
||||
crc ^= *p++ << 24;
|
||||
|
@@ -23,6 +23,7 @@
|
||||
* shut up after that.
|
||||
*/
|
||||
int debug_locks = 1;
|
||||
EXPORT_SYMBOL_GPL(debug_locks);
|
||||
|
||||
/*
|
||||
* The locking-testsuite uses <debug_locks_silent> to get a
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
#define ODEBUG_HASH_BITS 14
|
||||
|
@@ -97,7 +97,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
|
||||
u32 src_len, dst_len;
|
||||
size_t tmp;
|
||||
u8 *in_buf, *in_buf_save, *out_buf;
|
||||
int obytes_processed = 0;
|
||||
int ret = -1;
|
||||
|
||||
set_error_fn(error_fn);
|
||||
|
||||
@@ -174,15 +174,22 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
|
||||
|
||||
/* decompress */
|
||||
tmp = dst_len;
|
||||
r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
|
||||
|
||||
/* When the input data is not compressed at all,
|
||||
* lzo1x_decompress_safe will fail, so call memcpy()
|
||||
* instead */
|
||||
if (unlikely(dst_len == src_len))
|
||||
memcpy(out_buf, in_buf, src_len);
|
||||
else {
|
||||
r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
|
||||
out_buf, &tmp);
|
||||
|
||||
if (r != LZO_E_OK || dst_len != tmp) {
|
||||
error("Compressed data violation");
|
||||
goto exit_2;
|
||||
if (r != LZO_E_OK || dst_len != tmp) {
|
||||
error("Compressed data violation");
|
||||
goto exit_2;
|
||||
}
|
||||
}
|
||||
|
||||
obytes_processed += dst_len;
|
||||
if (flush)
|
||||
flush(out_buf, dst_len);
|
||||
if (output)
|
||||
@@ -196,6 +203,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
|
||||
in_buf += src_len;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
exit_2:
|
||||
if (!input)
|
||||
free(in_buf);
|
||||
@@ -203,7 +211,7 @@ exit_1:
|
||||
if (!output)
|
||||
free(out_buf);
|
||||
exit:
|
||||
return obytes_processed;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define decompress unlzo
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
void devm_ioremap_release(struct device *dev, void *res)
|
||||
|
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
|
||||
* Now parse out the first token and use it as the name for the
|
||||
* driver to filter for.
|
||||
*/
|
||||
for (i = 0; i < NAME_MAX_LEN; ++i) {
|
||||
for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
|
||||
current_driver_name[i] = buf[i];
|
||||
if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
|
||||
break;
|
||||
@@ -587,7 +587,7 @@ out_unlock:
|
||||
return count;
|
||||
}
|
||||
|
||||
const struct file_operations filter_fops = {
|
||||
static const struct file_operations filter_fops = {
|
||||
.read = filter_read,
|
||||
.write = filter_write,
|
||||
};
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/dynamic_debug.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
extern struct _ddebug __start___verbose[];
|
||||
extern struct _ddebug __stop___verbose[];
|
||||
|
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
|
||||
ret->element_size = element_size;
|
||||
ret->total_nr_elements = total;
|
||||
if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
|
||||
memset(ret->parts[0], FLEX_ARRAY_FREE,
|
||||
memset(&ret->parts[0], FLEX_ARRAY_FREE,
|
||||
FLEX_ARRAY_BASE_BYTES_LEFT);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/genalloc.h>
|
||||
|
@@ -11,11 +11,18 @@
|
||||
|
||||
unsigned int hweight32(unsigned int w)
|
||||
{
|
||||
#ifdef ARCH_HAS_FAST_MULTIPLIER
|
||||
w -= (w >> 1) & 0x55555555;
|
||||
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
|
||||
w = (w + (w >> 4)) & 0x0f0f0f0f;
|
||||
return (w * 0x01010101) >> 24;
|
||||
#else
|
||||
unsigned int res = w - ((w >> 1) & 0x55555555);
|
||||
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
|
||||
res = (res + (res >> 4)) & 0x0F0F0F0F;
|
||||
res = res + (res >> 8);
|
||||
return (res + (res >> 16)) & 0x000000FF;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(hweight32);
|
||||
|
||||
|
12
lib/idr.c
12
lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
|
||||
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
|
||||
|
||||
/* if already at the top layer, we need to grow */
|
||||
if (!(p = pa[l])) {
|
||||
if (id >= 1 << (idp->layers * IDR_BITS)) {
|
||||
*starting_id = id;
|
||||
return IDR_NEED_TO_GROW;
|
||||
}
|
||||
p = pa[l];
|
||||
BUG_ON(!p);
|
||||
|
||||
/* If we need to go up one layer, continue the
|
||||
* loop; otherwise, restart from the top.
|
||||
@@ -502,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
|
||||
int n;
|
||||
struct idr_layer *p;
|
||||
|
||||
p = rcu_dereference(idp->top);
|
||||
p = rcu_dereference_raw(idp->top);
|
||||
if (!p)
|
||||
return NULL;
|
||||
n = (p->layer+1) * IDR_BITS;
|
||||
@@ -517,7 +519,7 @@ void *idr_find(struct idr *idp, int id)
|
||||
while (n > 0 && p) {
|
||||
n -= IDR_BITS;
|
||||
BUG_ON(n != p->layer*IDR_BITS);
|
||||
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
|
||||
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
|
||||
}
|
||||
return((void *)p);
|
||||
}
|
||||
@@ -550,7 +552,7 @@ int idr_for_each(struct idr *idp,
|
||||
struct idr_layer **paa = &pa[0];
|
||||
|
||||
n = idp->layers * IDR_BITS;
|
||||
p = rcu_dereference(idp->top);
|
||||
p = rcu_dereference_raw(idp->top);
|
||||
max = 1 << n;
|
||||
|
||||
id = 0;
|
||||
@@ -558,7 +560,7 @@ int idr_for_each(struct idr *idp,
|
||||
while (n > 0 && p) {
|
||||
n -= IDR_BITS;
|
||||
*paa++ = p;
|
||||
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
|
||||
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
|
||||
}
|
||||
|
||||
if (p) {
|
||||
|
@@ -103,6 +103,7 @@
|
||||
the two sets of lengths.
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#ifdef RCSID
|
||||
static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
|
||||
|
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
|
@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct sysfs_ops kobj_sysfs_ops = {
|
||||
const struct sysfs_ops kobj_sysfs_ops = {
|
||||
.show = kobj_attr_show,
|
||||
.store = kobj_attr_store,
|
||||
};
|
||||
@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
|
||||
* If the kset was not able to be created, NULL will be returned.
|
||||
*/
|
||||
static struct kset *kset_create(const char *name,
|
||||
struct kset_uevent_ops *uevent_ops,
|
||||
const struct kset_uevent_ops *uevent_ops,
|
||||
struct kobject *parent_kobj)
|
||||
{
|
||||
struct kset *kset;
|
||||
@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
|
||||
* If the kset was not able to be created, NULL will be returned.
|
||||
*/
|
||||
struct kset *kset_create_and_add(const char *name,
|
||||
struct kset_uevent_ops *uevent_ops,
|
||||
const struct kset_uevent_ops *uevent_ops,
|
||||
struct kobject *parent_kobj)
|
||||
{
|
||||
struct kset *kset;
|
||||
|
@@ -18,6 +18,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/socket.h>
|
||||
#include <linux/skbuff.h>
|
||||
@@ -95,7 +96,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
|
||||
const char *subsystem;
|
||||
struct kobject *top_kobj;
|
||||
struct kset *kset;
|
||||
struct kset_uevent_ops *uevent_ops;
|
||||
const struct kset_uevent_ops *uevent_ops;
|
||||
u64 seq;
|
||||
int i = 0;
|
||||
int retval = 0;
|
||||
|
@@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/kref.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* kref_set - initialize object and set refcount to requested number.
|
||||
|
15
lib/lcm.c
一般檔案
15
lib/lcm.c
一般檔案
@@ -0,0 +1,15 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/gcd.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* Lowest common multiple */
|
||||
unsigned long lcm(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (a && b)
|
||||
return (a * b) / gcd(a, b);
|
||||
else if (b)
|
||||
return b;
|
||||
|
||||
return a;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lcm);
|
267
lib/list_sort.c
267
lib/list_sort.c
@@ -4,99 +4,214 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define MAX_LIST_LENGTH_BITS 20
|
||||
|
||||
/*
|
||||
* Returns a list organized in an intermediate format suited
|
||||
* to chaining of merge() calls: null-terminated, no reserved or
|
||||
* sentinel head node, "prev" links not maintained.
|
||||
*/
|
||||
static struct list_head *merge(void *priv,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b),
|
||||
struct list_head *a, struct list_head *b)
|
||||
{
|
||||
struct list_head head, *tail = &head;
|
||||
|
||||
while (a && b) {
|
||||
/* if equal, take 'a' -- important for sort stability */
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
a = a->next;
|
||||
} else {
|
||||
tail->next = b;
|
||||
b = b->next;
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
tail->next = a?:b;
|
||||
return head.next;
|
||||
}
|
||||
|
||||
/*
|
||||
* Combine final list merge with restoration of standard doubly-linked
|
||||
* list structure. This approach duplicates code from merge(), but
|
||||
* runs faster than the tidier alternatives of either a separate final
|
||||
* prev-link restoration pass, or maintaining the prev links
|
||||
* throughout.
|
||||
*/
|
||||
static void merge_and_restore_back_links(void *priv,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b),
|
||||
struct list_head *head,
|
||||
struct list_head *a, struct list_head *b)
|
||||
{
|
||||
struct list_head *tail = head;
|
||||
|
||||
while (a && b) {
|
||||
/* if equal, take 'a' -- important for sort stability */
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
a->prev = tail;
|
||||
a = a->next;
|
||||
} else {
|
||||
tail->next = b;
|
||||
b->prev = tail;
|
||||
b = b->next;
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
tail->next = a ? : b;
|
||||
|
||||
do {
|
||||
/*
|
||||
* In worst cases this loop may run many iterations.
|
||||
* Continue callbacks to the client even though no
|
||||
* element comparison is needed, so the client's cmp()
|
||||
* routine can invoke cond_resched() periodically.
|
||||
*/
|
||||
(*cmp)(priv, tail, tail);
|
||||
|
||||
tail->next->prev = tail;
|
||||
tail = tail->next;
|
||||
} while (tail->next);
|
||||
|
||||
tail->next = head;
|
||||
head->prev = tail;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_sort - sort a list.
|
||||
* @priv: private data, passed to @cmp
|
||||
* list_sort - sort a list
|
||||
* @priv: private data, opaque to list_sort(), passed to @cmp
|
||||
* @head: the list to sort
|
||||
* @cmp: the elements comparison function
|
||||
*
|
||||
* This function has been implemented by Mark J Roberts <mjr@znex.org>. It
|
||||
* implements "merge sort" which has O(nlog(n)) complexity. The list is sorted
|
||||
* in ascending order.
|
||||
* This function implements "merge sort", which has O(nlog(n))
|
||||
* complexity.
|
||||
*
|
||||
* The comparison function @cmp is supposed to return a negative value if @a is
|
||||
* less than @b, and a positive value if @a is greater than @b. If @a and @b
|
||||
* are equivalent, then it does not matter what this function returns.
|
||||
* The comparison function @cmp must return a negative value if @a
|
||||
* should sort before @b, and a positive value if @a should sort after
|
||||
* @b. If @a and @b are equivalent, and their original relative
|
||||
* ordering is to be preserved, @cmp must return 0.
|
||||
*/
|
||||
void list_sort(void *priv, struct list_head *head,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b))
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b))
|
||||
{
|
||||
struct list_head *p, *q, *e, *list, *tail, *oldhead;
|
||||
int insize, nmerges, psize, qsize, i;
|
||||
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
|
||||
-- last slot is a sentinel */
|
||||
int lev; /* index into part[] */
|
||||
int max_lev = 0;
|
||||
struct list_head *list;
|
||||
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
memset(part, 0, sizeof(part));
|
||||
|
||||
head->prev->next = NULL;
|
||||
list = head->next;
|
||||
list_del(head);
|
||||
insize = 1;
|
||||
for (;;) {
|
||||
p = oldhead = list;
|
||||
list = tail = NULL;
|
||||
nmerges = 0;
|
||||
|
||||
while (p) {
|
||||
nmerges++;
|
||||
q = p;
|
||||
psize = 0;
|
||||
for (i = 0; i < insize; i++) {
|
||||
psize++;
|
||||
q = q->next == oldhead ? NULL : q->next;
|
||||
if (!q)
|
||||
break;
|
||||
}
|
||||
while (list) {
|
||||
struct list_head *cur = list;
|
||||
list = list->next;
|
||||
cur->next = NULL;
|
||||
|
||||
qsize = insize;
|
||||
while (psize > 0 || (qsize > 0 && q)) {
|
||||
if (!psize) {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
} else if (!qsize || !q) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else if (cmp(priv, p, q) <= 0) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
}
|
||||
if (tail)
|
||||
tail->next = e;
|
||||
else
|
||||
list = e;
|
||||
e->prev = tail;
|
||||
tail = e;
|
||||
}
|
||||
p = q;
|
||||
for (lev = 0; part[lev]; lev++) {
|
||||
cur = merge(priv, cmp, part[lev], cur);
|
||||
part[lev] = NULL;
|
||||
}
|
||||
|
||||
tail->next = list;
|
||||
list->prev = tail;
|
||||
|
||||
if (nmerges <= 1)
|
||||
break;
|
||||
|
||||
insize *= 2;
|
||||
if (lev > max_lev) {
|
||||
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
|
||||
printk_once(KERN_DEBUG "list passed to"
|
||||
" list_sort() too long for"
|
||||
" efficiency\n");
|
||||
lev--;
|
||||
}
|
||||
max_lev = lev;
|
||||
}
|
||||
part[lev] = cur;
|
||||
}
|
||||
|
||||
head->next = list;
|
||||
head->prev = list->prev;
|
||||
list->prev->next = head;
|
||||
list->prev = head;
|
||||
for (lev = 0; lev < max_lev; lev++)
|
||||
if (part[lev])
|
||||
list = merge(priv, cmp, part[lev], list);
|
||||
|
||||
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
|
||||
}
|
||||
EXPORT_SYMBOL(list_sort);
|
||||
|
||||
#ifdef DEBUG_LIST_SORT
|
||||
struct debug_el {
|
||||
struct list_head l_h;
|
||||
int value;
|
||||
unsigned serial;
|
||||
};
|
||||
|
||||
static int cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||
{
|
||||
return container_of(a, struct debug_el, l_h)->value
|
||||
- container_of(b, struct debug_el, l_h)->value;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(list_sort);
|
||||
/*
|
||||
* The pattern of set bits in the list length determines which cases
|
||||
* are hit in list_sort().
|
||||
*/
|
||||
#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */
|
||||
|
||||
static int __init list_sort_test(void)
|
||||
{
|
||||
int i, r = 1, count;
|
||||
struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL);
|
||||
struct list_head *cur;
|
||||
|
||||
printk(KERN_WARNING "testing list_sort()\n");
|
||||
|
||||
cur = head;
|
||||
for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) {
|
||||
struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL);
|
||||
BUG_ON(!el);
|
||||
/* force some equivalencies */
|
||||
el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3);
|
||||
el->serial = i;
|
||||
|
||||
el->l_h.prev = cur;
|
||||
cur->next = &el->l_h;
|
||||
cur = cur->next;
|
||||
}
|
||||
head->prev = cur;
|
||||
|
||||
list_sort(NULL, head, cmp);
|
||||
|
||||
count = 1;
|
||||
for (cur = head->next; cur->next != head; cur = cur->next) {
|
||||
struct debug_el *el = container_of(cur, struct debug_el, l_h);
|
||||
int cmp_result = cmp(NULL, cur, cur->next);
|
||||
if (cur->next->prev != cur) {
|
||||
printk(KERN_EMERG "list_sort() returned "
|
||||
"a corrupted list!\n");
|
||||
return 1;
|
||||
} else if (cmp_result > 0) {
|
||||
printk(KERN_EMERG "list_sort() failed to sort!\n");
|
||||
return 1;
|
||||
} else if (cmp_result == 0 &&
|
||||
el->serial >= container_of(cur->next,
|
||||
struct debug_el, l_h)->serial) {
|
||||
printk(KERN_EMERG "list_sort() failed to preserve order"
|
||||
" of equivalent elements!\n");
|
||||
return 1;
|
||||
}
|
||||
kfree(cur->prev);
|
||||
count++;
|
||||
}
|
||||
kfree(cur);
|
||||
if (count != LIST_SORT_TEST_LENGTH) {
|
||||
printk(KERN_EMERG "list_sort() returned list of"
|
||||
"different length!\n");
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
module_init(list_sort_test);
|
||||
#endif
|
||||
|
13
lib/lmb.c
13
lib/lmb.c
@@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size)
|
||||
|
||||
}
|
||||
|
||||
long lmb_remove(u64 base, u64 size)
|
||||
static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
|
||||
{
|
||||
struct lmb_region *rgn = &(lmb.memory);
|
||||
u64 rgnbegin, rgnend;
|
||||
u64 end = base + size;
|
||||
int i;
|
||||
@@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size)
|
||||
return lmb_add_region(rgn, end, rgnend - end);
|
||||
}
|
||||
|
||||
long lmb_remove(u64 base, u64 size)
|
||||
{
|
||||
return __lmb_remove(&lmb.memory, base, size);
|
||||
}
|
||||
|
||||
long __init lmb_free(u64 base, u64 size)
|
||||
{
|
||||
return __lmb_remove(&lmb.reserved, base, size);
|
||||
}
|
||||
|
||||
long __init lmb_reserve(u64 base, u64 size)
|
||||
{
|
||||
struct lmb_region *_rgn = &lmb.reserved;
|
||||
|
@@ -28,7 +28,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/rcupdate.h>
|
||||
@@ -364,7 +363,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
|
||||
unsigned int height, shift;
|
||||
struct radix_tree_node *node, **slot;
|
||||
|
||||
node = rcu_dereference(root->rnode);
|
||||
node = rcu_dereference_raw(root->rnode);
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
|
||||
@@ -384,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
|
||||
do {
|
||||
slot = (struct radix_tree_node **)
|
||||
(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
|
||||
node = rcu_dereference(*slot);
|
||||
node = rcu_dereference_raw(*slot);
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
|
||||
@@ -556,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
|
||||
*
|
||||
* 0: tag not present or not set
|
||||
* 1: tag set
|
||||
*
|
||||
* Note that the return value of this function may not be relied on, even if
|
||||
* the RCU lock is held, unless tag modification and node deletion are excluded
|
||||
* from concurrency.
|
||||
*/
|
||||
int radix_tree_tag_get(struct radix_tree_root *root,
|
||||
unsigned long index, unsigned int tag)
|
||||
@@ -568,7 +571,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
|
||||
if (!root_tag_get(root, tag))
|
||||
return 0;
|
||||
|
||||
node = rcu_dereference(root->rnode);
|
||||
node = rcu_dereference_raw(root->rnode);
|
||||
if (node == NULL)
|
||||
return 0;
|
||||
|
||||
@@ -596,13 +599,9 @@ int radix_tree_tag_get(struct radix_tree_root *root,
|
||||
*/
|
||||
if (!tag_get(node, tag, offset))
|
||||
saw_unset_tag = 1;
|
||||
if (height == 1) {
|
||||
int ret = tag_get(node, tag, offset);
|
||||
|
||||
BUG_ON(ret && saw_unset_tag);
|
||||
return !!ret;
|
||||
}
|
||||
node = rcu_dereference(node->slots[offset]);
|
||||
if (height == 1)
|
||||
return !!tag_get(node, tag, offset);
|
||||
node = rcu_dereference_raw(node->slots[offset]);
|
||||
shift -= RADIX_TREE_MAP_SHIFT;
|
||||
height--;
|
||||
}
|
||||
@@ -711,7 +710,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
|
||||
}
|
||||
|
||||
shift -= RADIX_TREE_MAP_SHIFT;
|
||||
slot = rcu_dereference(slot->slots[i]);
|
||||
slot = rcu_dereference_raw(slot->slots[i]);
|
||||
if (slot == NULL)
|
||||
goto out;
|
||||
}
|
||||
@@ -758,7 +757,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
||||
unsigned long cur_index = first_index;
|
||||
unsigned int ret;
|
||||
|
||||
node = rcu_dereference(root->rnode);
|
||||
node = rcu_dereference_raw(root->rnode);
|
||||
if (!node)
|
||||
return 0;
|
||||
|
||||
@@ -787,7 +786,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
||||
slot = *(((void ***)results)[ret + i]);
|
||||
if (!slot)
|
||||
continue;
|
||||
results[ret + nr_found] = rcu_dereference(slot);
|
||||
results[ret + nr_found] = rcu_dereference_raw(slot);
|
||||
nr_found++;
|
||||
}
|
||||
ret += nr_found;
|
||||
@@ -826,7 +825,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
|
||||
unsigned long cur_index = first_index;
|
||||
unsigned int ret;
|
||||
|
||||
node = rcu_dereference(root->rnode);
|
||||
node = rcu_dereference_raw(root->rnode);
|
||||
if (!node)
|
||||
return 0;
|
||||
|
||||
@@ -915,7 +914,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
|
||||
}
|
||||
}
|
||||
shift -= RADIX_TREE_MAP_SHIFT;
|
||||
slot = rcu_dereference(slot->slots[i]);
|
||||
slot = rcu_dereference_raw(slot->slots[i]);
|
||||
if (slot == NULL)
|
||||
break;
|
||||
}
|
||||
@@ -951,7 +950,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
|
||||
if (!root_tag_get(root, tag))
|
||||
return 0;
|
||||
|
||||
node = rcu_dereference(root->rnode);
|
||||
node = rcu_dereference_raw(root->rnode);
|
||||
if (!node)
|
||||
return 0;
|
||||
|
||||
@@ -980,7 +979,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
|
||||
slot = *(((void ***)results)[ret + i]);
|
||||
if (!slot)
|
||||
continue;
|
||||
results[ret + nr_found] = rcu_dereference(slot);
|
||||
results[ret + nr_found] = rcu_dereference_raw(slot);
|
||||
nr_found++;
|
||||
}
|
||||
ret += nr_found;
|
||||
@@ -1020,7 +1019,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
|
||||
if (!root_tag_get(root, tag))
|
||||
return 0;
|
||||
|
||||
node = rcu_dereference(root->rnode);
|
||||
node = rcu_dereference_raw(root->rnode);
|
||||
if (!node)
|
||||
return 0;
|
||||
|
||||
|
@@ -16,9 +16,14 @@
|
||||
/*
|
||||
* __ratelimit - rate limiting
|
||||
* @rs: ratelimit_state data
|
||||
* @func: name of calling function
|
||||
*
|
||||
* This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
|
||||
* in every @rs->ratelimit_jiffies
|
||||
* This enforces a rate limit: not more than @rs->burst callbacks
|
||||
* in every @rs->interval
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 means callbacks will be suppressed.
|
||||
* 1 means go ahead and do it.
|
||||
*/
|
||||
int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
{
|
||||
@@ -35,7 +40,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
* the entity that is holding the lock already:
|
||||
*/
|
||||
if (!spin_trylock_irqsave(&rs->lock, flags))
|
||||
return 1;
|
||||
return 0;
|
||||
|
||||
if (!rs->begin)
|
||||
rs->begin = jiffies;
|
||||
|
@@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
struct rwsem_waiter waiter;
|
||||
struct task_struct *tsk;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&sem->wait_lock);
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
sem->activity++;
|
||||
spin_unlock_irq(&sem->wait_lock);
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
|
||||
/* we don't need to touch the semaphore struct anymore */
|
||||
spin_unlock_irq(&sem->wait_lock);
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
/* wait to be given the lock */
|
||||
for (;;) {
|
||||
@@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||
{
|
||||
struct rwsem_waiter waiter;
|
||||
struct task_struct *tsk;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&sem->wait_lock);
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
sem->activity = -1;
|
||||
spin_unlock_irq(&sem->wait_lock);
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
|
||||
/* we don't need to touch the semaphore struct anymore */
|
||||
spin_unlock_irq(&sem->wait_lock);
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
/* wait to be given the lock */
|
||||
for (;;) {
|
||||
|
@@ -7,6 +7,7 @@
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
|
@@ -15,7 +15,7 @@ void show_mem(void)
|
||||
unsigned long total = 0, reserved = 0, shared = 0,
|
||||
nonshared = 0, highmem = 0;
|
||||
|
||||
printk(KERN_INFO "Mem-Info:\n");
|
||||
printk("Mem-Info:\n");
|
||||
show_free_areas();
|
||||
|
||||
for_each_online_pgdat(pgdat) {
|
||||
@@ -49,15 +49,15 @@ void show_mem(void)
|
||||
pgdat_resize_unlock(pgdat, &flags);
|
||||
}
|
||||
|
||||
printk(KERN_INFO "%lu pages RAM\n", total);
|
||||
printk("%lu pages RAM\n", total);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
printk(KERN_INFO "%lu pages HighMem\n", highmem);
|
||||
printk("%lu pages HighMem\n", highmem);
|
||||
#endif
|
||||
printk(KERN_INFO "%lu pages reserved\n", reserved);
|
||||
printk(KERN_INFO "%lu pages shared\n", shared);
|
||||
printk(KERN_INFO "%lu pages non-shared\n", nonshared);
|
||||
printk("%lu pages reserved\n", reserved);
|
||||
printk("%lu pages shared\n", shared);
|
||||
printk("%lu pages non-shared\n", nonshared);
|
||||
#ifdef CONFIG_QUICKLIST
|
||||
printk(KERN_INFO "%lu pages in pagetable cache\n",
|
||||
printk("%lu pages in pagetable cache\n",
|
||||
quicklist_total_size());
|
||||
#endif
|
||||
}
|
||||
|
40
lib/string.c
40
lib/string.c
@@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len)
|
||||
/* Yes, Virginia, it had better be unsigned */
|
||||
unsigned char c1, c2;
|
||||
|
||||
c1 = c2 = 0;
|
||||
if (len) {
|
||||
do {
|
||||
c1 = *s1;
|
||||
c2 = *s2;
|
||||
s1++;
|
||||
s2++;
|
||||
if (!c1)
|
||||
break;
|
||||
if (!c2)
|
||||
break;
|
||||
if (c1 == c2)
|
||||
continue;
|
||||
c1 = tolower(c1);
|
||||
c2 = tolower(c2);
|
||||
if (c1 != c2)
|
||||
break;
|
||||
} while (--len);
|
||||
}
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
c1 = *s1++;
|
||||
c2 = *s2++;
|
||||
if (!c1 || !c2)
|
||||
break;
|
||||
if (c1 == c2)
|
||||
continue;
|
||||
c1 = tolower(c1);
|
||||
c2 = tolower(c2);
|
||||
if (c1 != c2)
|
||||
break;
|
||||
} while (--len);
|
||||
return (int)c1 - (int)c2;
|
||||
}
|
||||
EXPORT_SYMBOL(strnicmp);
|
||||
@@ -693,13 +689,13 @@ EXPORT_SYMBOL(strstr);
|
||||
*/
|
||||
char *strnstr(const char *s1, const char *s2, size_t len)
|
||||
{
|
||||
size_t l1 = len, l2;
|
||||
size_t l2;
|
||||
|
||||
l2 = strlen(s2);
|
||||
if (!l2)
|
||||
return (char *)s1;
|
||||
while (l1 >= l2) {
|
||||
l1--;
|
||||
while (len >= l2) {
|
||||
len--;
|
||||
if (!memcmp(s1, s2, l2))
|
||||
return (char *)s1;
|
||||
s1++;
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/dma.h>
|
||||
|
@@ -103,6 +103,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/textsearch.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static LIST_HEAD(ts_ops);
|
||||
static DEFINE_SPINLOCK(ts_mod_lock);
|
||||
|
131
lib/vsprintf.c
131
lib/vsprintf.c
@@ -118,6 +118,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
|
||||
|
||||
return simple_strtoull(cp, endp, base);
|
||||
}
|
||||
EXPORT_SYMBOL(simple_strtoll);
|
||||
|
||||
/**
|
||||
* strict_strtoul - convert a string to an unsigned long strictly
|
||||
@@ -381,8 +382,8 @@ static noinline char *put_dec(char *buf, unsigned long long num)
|
||||
#define PLUS 4 /* show plus */
|
||||
#define SPACE 8 /* space if plus */
|
||||
#define LEFT 16 /* left justified */
|
||||
#define SMALL 32 /* Must be 32 == 0x20 */
|
||||
#define SPECIAL 64 /* 0x */
|
||||
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
|
||||
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
|
||||
|
||||
enum format_type {
|
||||
FORMAT_TYPE_NONE, /* Just a string part */
|
||||
@@ -408,12 +409,12 @@ enum format_type {
|
||||
};
|
||||
|
||||
struct printf_spec {
|
||||
enum format_type type;
|
||||
int flags; /* flags to number() */
|
||||
int field_width; /* width of output field */
|
||||
int base;
|
||||
int precision; /* # of digits/chars */
|
||||
int qualifier;
|
||||
u8 type; /* format_type enum */
|
||||
u8 flags; /* flags to number() */
|
||||
u8 base; /* number base, 8, 10 or 16 only */
|
||||
u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
|
||||
s16 field_width; /* width of output field */
|
||||
s16 precision; /* # of digits/chars */
|
||||
};
|
||||
|
||||
static char *number(char *buf, char *end, unsigned long long num,
|
||||
@@ -597,22 +598,35 @@ static char *resource_string(char *buf, char *end, struct resource *res,
|
||||
#ifndef MEM_RSRC_PRINTK_SIZE
|
||||
#define MEM_RSRC_PRINTK_SIZE 10
|
||||
#endif
|
||||
struct printf_spec hex_spec = {
|
||||
static const struct printf_spec io_spec = {
|
||||
.base = 16,
|
||||
.field_width = IO_RSRC_PRINTK_SIZE,
|
||||
.precision = -1,
|
||||
.flags = SPECIAL | SMALL | ZEROPAD,
|
||||
};
|
||||
struct printf_spec dec_spec = {
|
||||
static const struct printf_spec mem_spec = {
|
||||
.base = 16,
|
||||
.field_width = MEM_RSRC_PRINTK_SIZE,
|
||||
.precision = -1,
|
||||
.flags = SPECIAL | SMALL | ZEROPAD,
|
||||
};
|
||||
static const struct printf_spec bus_spec = {
|
||||
.base = 16,
|
||||
.field_width = 2,
|
||||
.precision = -1,
|
||||
.flags = SMALL | ZEROPAD,
|
||||
};
|
||||
static const struct printf_spec dec_spec = {
|
||||
.base = 10,
|
||||
.precision = -1,
|
||||
.flags = 0,
|
||||
};
|
||||
struct printf_spec str_spec = {
|
||||
static const struct printf_spec str_spec = {
|
||||
.field_width = -1,
|
||||
.precision = 10,
|
||||
.flags = LEFT,
|
||||
};
|
||||
struct printf_spec flag_spec = {
|
||||
static const struct printf_spec flag_spec = {
|
||||
.base = 16,
|
||||
.precision = -1,
|
||||
.flags = SPECIAL | SMALL,
|
||||
@@ -622,47 +636,48 @@ static char *resource_string(char *buf, char *end, struct resource *res,
|
||||
* 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
|
||||
#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
|
||||
#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
|
||||
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]")
|
||||
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
|
||||
#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
|
||||
char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
|
||||
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
|
||||
|
||||
char *p = sym, *pend = sym + sizeof(sym);
|
||||
int size = -1, addr = 0;
|
||||
int decode = (fmt[0] == 'R') ? 1 : 0;
|
||||
|
||||
if (res->flags & IORESOURCE_IO) {
|
||||
size = IO_RSRC_PRINTK_SIZE;
|
||||
addr = 1;
|
||||
} else if (res->flags & IORESOURCE_MEM) {
|
||||
size = MEM_RSRC_PRINTK_SIZE;
|
||||
addr = 1;
|
||||
}
|
||||
const struct printf_spec *specp;
|
||||
|
||||
*p++ = '[';
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
if (res->flags & IORESOURCE_IO) {
|
||||
p = string(p, pend, "io ", str_spec);
|
||||
else if (res->flags & IORESOURCE_MEM)
|
||||
specp = &io_spec;
|
||||
} else if (res->flags & IORESOURCE_MEM) {
|
||||
p = string(p, pend, "mem ", str_spec);
|
||||
else if (res->flags & IORESOURCE_IRQ)
|
||||
specp = &mem_spec;
|
||||
} else if (res->flags & IORESOURCE_IRQ) {
|
||||
p = string(p, pend, "irq ", str_spec);
|
||||
else if (res->flags & IORESOURCE_DMA)
|
||||
specp = &dec_spec;
|
||||
} else if (res->flags & IORESOURCE_DMA) {
|
||||
p = string(p, pend, "dma ", str_spec);
|
||||
else {
|
||||
specp = &dec_spec;
|
||||
} else if (res->flags & IORESOURCE_BUS) {
|
||||
p = string(p, pend, "bus ", str_spec);
|
||||
specp = &bus_spec;
|
||||
} else {
|
||||
p = string(p, pend, "??? ", str_spec);
|
||||
specp = &mem_spec;
|
||||
decode = 0;
|
||||
}
|
||||
hex_spec.field_width = size;
|
||||
p = number(p, pend, res->start, addr ? hex_spec : dec_spec);
|
||||
p = number(p, pend, res->start, *specp);
|
||||
if (res->start != res->end) {
|
||||
*p++ = '-';
|
||||
p = number(p, pend, res->end, addr ? hex_spec : dec_spec);
|
||||
p = number(p, pend, res->end, *specp);
|
||||
}
|
||||
if (decode) {
|
||||
if (res->flags & IORESOURCE_MEM_64)
|
||||
p = string(p, pend, " 64bit", str_spec);
|
||||
if (res->flags & IORESOURCE_PREFETCH)
|
||||
p = string(p, pend, " pref", str_spec);
|
||||
if (res->flags & IORESOURCE_WINDOW)
|
||||
p = string(p, pend, " window", str_spec);
|
||||
if (res->flags & IORESOURCE_DISABLED)
|
||||
p = string(p, pend, " disabled", str_spec);
|
||||
} else {
|
||||
@@ -681,24 +696,55 @@ static char *mac_address_string(char *buf, char *end, u8 *addr,
|
||||
char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
|
||||
char *p = mac_addr;
|
||||
int i;
|
||||
char separator;
|
||||
|
||||
if (fmt[1] == 'F') { /* FDDI canonical format */
|
||||
separator = '-';
|
||||
} else {
|
||||
separator = ':';
|
||||
}
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
p = pack_hex_byte(p, addr[i]);
|
||||
if (fmt[0] == 'M' && i != 5)
|
||||
*p++ = ':';
|
||||
*p++ = separator;
|
||||
}
|
||||
*p = '\0';
|
||||
|
||||
return string(buf, end, mac_addr, spec);
|
||||
}
|
||||
|
||||
static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
|
||||
static char *ip4_string(char *p, const u8 *addr, const char *fmt)
|
||||
{
|
||||
int i;
|
||||
bool leading_zeros = (fmt[0] == 'i');
|
||||
int index;
|
||||
int step;
|
||||
|
||||
switch (fmt[2]) {
|
||||
case 'h':
|
||||
#ifdef __BIG_ENDIAN
|
||||
index = 0;
|
||||
step = 1;
|
||||
#else
|
||||
index = 3;
|
||||
step = -1;
|
||||
#endif
|
||||
break;
|
||||
case 'l':
|
||||
index = 3;
|
||||
step = -1;
|
||||
break;
|
||||
case 'n':
|
||||
case 'b':
|
||||
default:
|
||||
index = 0;
|
||||
step = 1;
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < 4; i++) {
|
||||
char temp[3]; /* hold each IP quad in reverse order */
|
||||
int digits = put_dec_trunc(temp, addr[i]) - temp;
|
||||
int digits = put_dec_trunc(temp, addr[index]) - temp;
|
||||
if (leading_zeros) {
|
||||
if (digits < 3)
|
||||
*p++ = '0';
|
||||
@@ -710,6 +756,7 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
|
||||
*p++ = temp[digits];
|
||||
if (i < 3)
|
||||
*p++ = '.';
|
||||
index += step;
|
||||
}
|
||||
*p = '\0';
|
||||
|
||||
@@ -789,7 +836,7 @@ static char *ip6_compressed_string(char *p, const char *addr)
|
||||
if (useIPv4) {
|
||||
if (needcolon)
|
||||
*p++ = ':';
|
||||
p = ip4_string(p, &in6.s6_addr[12], false);
|
||||
p = ip4_string(p, &in6.s6_addr[12], "I4");
|
||||
}
|
||||
*p = '\0';
|
||||
|
||||
@@ -829,7 +876,7 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
|
||||
{
|
||||
char ip4_addr[sizeof("255.255.255.255")];
|
||||
|
||||
ip4_string(ip4_addr, addr, fmt[0] == 'i');
|
||||
ip4_string(ip4_addr, addr, fmt);
|
||||
|
||||
return string(buf, end, ip4_addr, spec);
|
||||
}
|
||||
@@ -896,12 +943,15 @@ static char *uuid_string(char *buf, char *end, const u8 *addr,
|
||||
* - 'M' For a 6-byte MAC address, it prints the address in the
|
||||
* usual colon-separated hex notation
|
||||
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
|
||||
* - 'MF' For a 6-byte MAC FDDI address, it prints the address
|
||||
* with a dash-separated hex notation
|
||||
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
|
||||
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
|
||||
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
|
||||
* - 'i' [46] for 'raw' IPv4/IPv6 addresses
|
||||
* IPv6 omits the colons (01020304...0f)
|
||||
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
|
||||
* - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
|
||||
* - 'I6c' for IPv6 addresses printed as specified by
|
||||
* http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
|
||||
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
|
||||
@@ -939,6 +989,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
||||
return resource_string(buf, end, ptr, spec, fmt);
|
||||
case 'M': /* Colon separated: 00:01:02:03:04:05 */
|
||||
case 'm': /* Contiguous: 000102030405 */
|
||||
/* [mM]F (FDDI, bit reversed) */
|
||||
return mac_address_string(buf, end, ptr, spec, fmt);
|
||||
case 'I': /* Formatted IP supported
|
||||
* 4: 1.2.3.4
|
||||
@@ -1297,7 +1348,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||
break;
|
||||
|
||||
case FORMAT_TYPE_NRCHARS: {
|
||||
int qualifier = spec.qualifier;
|
||||
u8 qualifier = spec.qualifier;
|
||||
|
||||
if (qualifier == 'l') {
|
||||
long *ip = va_arg(args, long *);
|
||||
@@ -1583,7 +1634,7 @@ do { \
|
||||
|
||||
case FORMAT_TYPE_NRCHARS: {
|
||||
/* skip %n 's argument */
|
||||
int qualifier = spec.qualifier;
|
||||
u8 qualifier = spec.qualifier;
|
||||
void *skip_arg;
|
||||
if (qualifier == 'l')
|
||||
skip_arg = va_arg(args, long *);
|
||||
@@ -1849,7 +1900,9 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
|
||||
char *next;
|
||||
char digit;
|
||||
int num = 0;
|
||||
int qualifier, base, field_width;
|
||||
u8 qualifier;
|
||||
u8 base;
|
||||
s16 field_width;
|
||||
bool is_sign;
|
||||
|
||||
while (*fmt && *str) {
|
||||
@@ -1927,7 +1980,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
|
||||
{
|
||||
char *s = (char *)va_arg(args, char *);
|
||||
if (field_width == -1)
|
||||
field_width = INT_MAX;
|
||||
field_width = SHORT_MAX;
|
||||
/* first, skip leading white space in buffer */
|
||||
str = skip_spaces(str);
|
||||
|
||||
|
@@ -8,21 +8,6 @@
|
||||
#include "inflate.h"
|
||||
#include "inffast.h"
|
||||
|
||||
/* Only do the unaligned "Faster" variant when
|
||||
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set
|
||||
*
|
||||
* On powerpc, it won't be as we don't include autoconf.h
|
||||
* automatically for the boot wrapper, which is intended as
|
||||
* we run in an environment where we may not be able to deal
|
||||
* with (even rare) alignment faults. In addition, we do not
|
||||
* define __KERNEL__ for arch/powerpc/boot unlike x86
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
#include <asm/unaligned.h>
|
||||
#include <asm/byteorder.h>
|
||||
#endif
|
||||
|
||||
#ifndef ASMINF
|
||||
|
||||
/* Allow machine dependent optimization for post-increment or pre-increment.
|
||||
@@ -36,14 +21,31 @@
|
||||
- Pentium III (Anderson)
|
||||
- M68060 (Nikl)
|
||||
*/
|
||||
union uu {
|
||||
unsigned short us;
|
||||
unsigned char b[2];
|
||||
};
|
||||
|
||||
/* Endian independed version */
|
||||
static inline unsigned short
|
||||
get_unaligned16(const unsigned short *p)
|
||||
{
|
||||
union uu mm;
|
||||
unsigned char *b = (unsigned char *)p;
|
||||
|
||||
mm.b[0] = b[0];
|
||||
mm.b[1] = b[1];
|
||||
return mm.us;
|
||||
}
|
||||
|
||||
#ifdef POSTINC
|
||||
# define OFF 0
|
||||
# define PUP(a) *(a)++
|
||||
# define UP_UNALIGNED(a) get_unaligned((a)++)
|
||||
# define UP_UNALIGNED(a) get_unaligned16((a)++)
|
||||
#else
|
||||
# define OFF 1
|
||||
# define PUP(a) *++(a)
|
||||
# define UP_UNALIGNED(a) get_unaligned(++(a))
|
||||
# define UP_UNALIGNED(a) get_unaligned16(++(a))
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -256,7 +258,6 @@ void inflate_fast(z_streamp strm, unsigned start)
|
||||
}
|
||||
}
|
||||
else {
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
unsigned short *sout;
|
||||
unsigned long loops;
|
||||
|
||||
@@ -274,22 +275,25 @@ void inflate_fast(z_streamp strm, unsigned start)
|
||||
sfrom = (unsigned short *)(from - OFF);
|
||||
loops = len >> 1;
|
||||
do
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
PUP(sout) = PUP(sfrom);
|
||||
#else
|
||||
PUP(sout) = UP_UNALIGNED(sfrom);
|
||||
#endif
|
||||
while (--loops);
|
||||
out = (unsigned char *)sout + OFF;
|
||||
from = (unsigned char *)sfrom + OFF;
|
||||
} else { /* dist == 1 or dist == 2 */
|
||||
unsigned short pat16;
|
||||
|
||||
pat16 = *(sout-2+2*OFF);
|
||||
if (dist == 1)
|
||||
#if defined(__BIG_ENDIAN)
|
||||
pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8);
|
||||
#elif defined(__LITTLE_ENDIAN)
|
||||
pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8);
|
||||
#else
|
||||
#error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined
|
||||
#endif
|
||||
pat16 = *(sout-1+OFF);
|
||||
if (dist == 1) {
|
||||
union uu mm;
|
||||
/* copy one char pattern to both bytes */
|
||||
mm.us = pat16;
|
||||
mm.b[0] = mm.b[1];
|
||||
pat16 = mm.us;
|
||||
}
|
||||
loops = len >> 1;
|
||||
do
|
||||
PUP(sout) = pat16;
|
||||
@@ -298,20 +302,6 @@ void inflate_fast(z_streamp strm, unsigned start)
|
||||
}
|
||||
if (len & 1)
|
||||
PUP(out) = PUP(from);
|
||||
#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
|
||||
from = out - dist; /* copy direct from output */
|
||||
do { /* minimum length is three */
|
||||
PUP(out) = PUP(from);
|
||||
PUP(out) = PUP(from);
|
||||
PUP(out) = PUP(from);
|
||||
len -= 3;
|
||||
} while (len > 2);
|
||||
if (len) {
|
||||
PUP(out) = PUP(from);
|
||||
if (len > 1)
|
||||
PUP(out) = PUP(from);
|
||||
}
|
||||
#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
|
||||
}
|
||||
}
|
||||
else if ((op & 64) == 0) { /* 2nd level distance code */
|
||||
|
新增問題並參考
封鎖使用者