Merge branch 'akpm' (final batch from Andrew)
Merge third patch-bumb from Andrew Morton: "This wraps me up for -rc1. - Lots of misc stuff and things which were deferred/missed from patchbombings 1 & 2. - ocfs2 things - lib/scatterlist - hfsplus - fatfs - documentation - signals - procfs - lockdep - coredump - seqfile core - kexec - Tejun's large IDR tree reworkings - ipmi - partitions - nbd - random() things - kfifo - tools/testing/selftests updates - Sasha's large and pointless hlist cleanup" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (163 commits) hlist: drop the node parameter from iterators kcmp: make it depend on CHECKPOINT_RESTORE selftests: add a simple doc tools/testing/selftests/Makefile: rearrange targets selftests/efivarfs: add create-read test selftests/efivarfs: add empty file creation test selftests: add tests for efivarfs kfifo: fix kfifo_alloc() and kfifo_init() kfifo: move kfifo.c from kernel/ to lib/ arch Kconfig: centralise CONFIG_ARCH_NO_VIRT_TO_BUS w1: add support for DS2413 Dual Channel Addressable Switch memstick: move the dereference below the NULL test drivers/pps/clients/pps-gpio.c: use devm_kzalloc Documentation/DMA-API-HOWTO.txt: fix typo include/linux/eventfd.h: fix incorrect filename is a comment mtd: mtd_stresstest: use prandom_bytes() mtd: mtd_subpagetest: convert to use prandom library mtd: mtd_speedtest: use prandom_bytes mtd: mtd_pagetest: convert to use prandom library mtd: mtd_oobtest: convert to use prandom library ...
This commit is contained in:
@@ -51,7 +51,7 @@ struct task_struct;
|
||||
extern void debug_show_all_locks(void);
|
||||
extern void debug_show_held_locks(struct task_struct *task);
|
||||
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
|
||||
extern void debug_check_no_locks_held(struct task_struct *task);
|
||||
extern void debug_check_no_locks_held(void);
|
||||
#else
|
||||
static inline void debug_show_all_locks(void)
|
||||
{
|
||||
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
|
||||
}
|
||||
|
||||
static inline void
|
||||
debug_check_no_locks_held(struct task_struct *task)
|
||||
debug_check_no_locks_held(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@@ -13,7 +13,7 @@
|
||||
#include <linux/wait.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/asm-generic/fcntl.h when defining
|
||||
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
|
||||
* new flags, since they might collide with O_* ones. We want
|
||||
* to re-use O_* flags that couldn't possibly have a meaning
|
||||
* from eventfd, in order to leave a free define-space for
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#ifndef FREEZER_H_INCLUDED
|
||||
#define FREEZER_H_INCLUDED
|
||||
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/atomic.h>
|
||||
@@ -48,6 +49,8 @@ extern void thaw_kernel_threads(void);
|
||||
|
||||
static inline bool try_to_freeze(void)
|
||||
{
|
||||
if (!(current->flags & PF_NOFREEZE))
|
||||
debug_check_no_locks_held();
|
||||
might_sleep();
|
||||
if (likely(!freezing(current)))
|
||||
return false;
|
||||
|
@@ -115,51 +115,50 @@ static inline void hash_del_rcu(struct hlist_node *node)
|
||||
* hash_for_each - iterate over a hashtable
|
||||
* @name: hashtable to iterate
|
||||
* @bkt: integer to use as bucket loop cursor
|
||||
* @node: the &struct list_head to use as a loop cursor for each entry
|
||||
* @obj: the type * to use as a loop cursor for each entry
|
||||
* @member: the name of the hlist_node within the struct
|
||||
*/
|
||||
#define hash_for_each(name, bkt, node, obj, member) \
|
||||
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
|
||||
hlist_for_each_entry(obj, node, &name[bkt], member)
|
||||
#define hash_for_each(name, bkt, obj, member) \
|
||||
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
|
||||
(bkt)++)\
|
||||
hlist_for_each_entry(obj, &name[bkt], member)
|
||||
|
||||
/**
|
||||
* hash_for_each_rcu - iterate over a rcu enabled hashtable
|
||||
* @name: hashtable to iterate
|
||||
* @bkt: integer to use as bucket loop cursor
|
||||
* @node: the &struct list_head to use as a loop cursor for each entry
|
||||
* @obj: the type * to use as a loop cursor for each entry
|
||||
* @member: the name of the hlist_node within the struct
|
||||
*/
|
||||
#define hash_for_each_rcu(name, bkt, node, obj, member) \
|
||||
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
|
||||
hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
|
||||
#define hash_for_each_rcu(name, bkt, obj, member) \
|
||||
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
|
||||
(bkt)++)\
|
||||
hlist_for_each_entry_rcu(obj, &name[bkt], member)
|
||||
|
||||
/**
|
||||
* hash_for_each_safe - iterate over a hashtable safe against removal of
|
||||
* hash entry
|
||||
* @name: hashtable to iterate
|
||||
* @bkt: integer to use as bucket loop cursor
|
||||
* @node: the &struct list_head to use as a loop cursor for each entry
|
||||
* @tmp: a &struct used for temporary storage
|
||||
* @obj: the type * to use as a loop cursor for each entry
|
||||
* @member: the name of the hlist_node within the struct
|
||||
*/
|
||||
#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \
|
||||
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
|
||||
hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member)
|
||||
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
|
||||
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
|
||||
(bkt)++)\
|
||||
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
|
||||
|
||||
/**
|
||||
* hash_for_each_possible - iterate over all possible objects hashing to the
|
||||
* same bucket
|
||||
* @name: hashtable to iterate
|
||||
* @obj: the type * to use as a loop cursor for each entry
|
||||
* @node: the &struct list_head to use as a loop cursor for each entry
|
||||
* @member: the name of the hlist_node within the struct
|
||||
* @key: the key of the objects to iterate over
|
||||
*/
|
||||
#define hash_for_each_possible(name, obj, node, member, key) \
|
||||
hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
|
||||
#define hash_for_each_possible(name, obj, member, key) \
|
||||
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
|
||||
|
||||
/**
|
||||
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
|
||||
@@ -167,25 +166,24 @@ static inline void hash_del_rcu(struct hlist_node *node)
|
||||
* in a rcu enabled hashtable
|
||||
* @name: hashtable to iterate
|
||||
* @obj: the type * to use as a loop cursor for each entry
|
||||
* @node: the &struct list_head to use as a loop cursor for each entry
|
||||
* @member: the name of the hlist_node within the struct
|
||||
* @key: the key of the objects to iterate over
|
||||
*/
|
||||
#define hash_for_each_possible_rcu(name, obj, node, member, key) \
|
||||
hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
|
||||
#define hash_for_each_possible_rcu(name, obj, member, key) \
|
||||
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
|
||||
member)
|
||||
|
||||
/**
|
||||
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
|
||||
* same bucket safe against removals
|
||||
* @name: hashtable to iterate
|
||||
* @obj: the type * to use as a loop cursor for each entry
|
||||
* @node: the &struct list_head to use as a loop cursor for each entry
|
||||
* @tmp: a &struct used for temporary storage
|
||||
* @member: the name of the hlist_node within the struct
|
||||
* @key: the key of the objects to iterate over
|
||||
*/
|
||||
#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \
|
||||
hlist_for_each_entry_safe(obj, node, tmp, \
|
||||
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
|
||||
hlist_for_each_entry_safe(obj, tmp,\
|
||||
&name[hash_min(key, HASH_BITS(name))], member)
|
||||
|
||||
|
||||
|
@@ -17,69 +17,40 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
# define IDR_BITS 5
|
||||
# define IDR_FULL 0xfffffffful
|
||||
/* We can only use two of the bits in the top level because there is
|
||||
only one possible bit in the top level (5 bits * 7 levels = 35
|
||||
bits, but you only use 31 bits in the id). */
|
||||
# define TOP_LEVEL_FULL (IDR_FULL >> 30)
|
||||
#elif BITS_PER_LONG == 64
|
||||
# define IDR_BITS 6
|
||||
# define IDR_FULL 0xfffffffffffffffful
|
||||
/* We can only use two of the bits in the top level because there is
|
||||
only one possible bit in the top level (6 bits * 6 levels = 36
|
||||
bits, but you only use 31 bits in the id). */
|
||||
# define TOP_LEVEL_FULL (IDR_FULL >> 62)
|
||||
#else
|
||||
# error "BITS_PER_LONG is not 32 or 64"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We want shallower trees and thus more bits covered at each layer. 8
|
||||
* bits gives us large enough first layer for most use cases and maximum
|
||||
* tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
|
||||
* 1k on 32bit.
|
||||
*/
|
||||
#define IDR_BITS 8
|
||||
#define IDR_SIZE (1 << IDR_BITS)
|
||||
#define IDR_MASK ((1 << IDR_BITS)-1)
|
||||
|
||||
#define MAX_IDR_SHIFT (sizeof(int)*8 - 1)
|
||||
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
|
||||
#define MAX_IDR_MASK (MAX_IDR_BIT - 1)
|
||||
|
||||
/* Leave the possibility of an incomplete final layer */
|
||||
#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
|
||||
|
||||
/* Number of id_layer structs to leave in free list */
|
||||
#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
|
||||
|
||||
struct idr_layer {
|
||||
unsigned long bitmap; /* A zero bit means "space here" */
|
||||
int prefix; /* the ID prefix of this idr_layer */
|
||||
DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
|
||||
struct idr_layer __rcu *ary[1<<IDR_BITS];
|
||||
int count; /* When zero, we can release it */
|
||||
int layer; /* distance from leaf */
|
||||
struct rcu_head rcu_head;
|
||||
int count; /* When zero, we can release it */
|
||||
int layer; /* distance from leaf */
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct idr {
|
||||
struct idr_layer __rcu *top;
|
||||
struct idr_layer *id_free;
|
||||
int layers; /* only valid without concurrent changes */
|
||||
int id_free_cnt;
|
||||
spinlock_t lock;
|
||||
struct idr_layer __rcu *hint; /* the last layer allocated from */
|
||||
struct idr_layer __rcu *top;
|
||||
struct idr_layer *id_free;
|
||||
int layers; /* only valid w/o concurrent changes */
|
||||
int id_free_cnt;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#define IDR_INIT(name) \
|
||||
{ \
|
||||
.top = NULL, \
|
||||
.id_free = NULL, \
|
||||
.layers = 0, \
|
||||
.id_free_cnt = 0, \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
#define IDR_INIT(name) \
|
||||
{ \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
}
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
||||
|
||||
/* Actions to be taken after a call to _idr_sub_alloc */
|
||||
#define IDR_NEED_TO_GROW -2
|
||||
#define IDR_NOMORE_SPACE -3
|
||||
|
||||
#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
|
||||
|
||||
/**
|
||||
* DOC: idr sync
|
||||
* idr synchronization (stolen from radix-tree.h)
|
||||
@@ -101,19 +72,90 @@ struct idr {
|
||||
* This is what we export.
|
||||
*/
|
||||
|
||||
void *idr_find(struct idr *idp, int id);
|
||||
void *idr_find_slowpath(struct idr *idp, int id);
|
||||
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
|
||||
int idr_get_new(struct idr *idp, void *ptr, int *id);
|
||||
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
|
||||
void idr_preload(gfp_t gfp_mask);
|
||||
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
|
||||
int idr_for_each(struct idr *idp,
|
||||
int (*fn)(int id, void *p, void *data), void *data);
|
||||
void *idr_get_next(struct idr *idp, int *nextid);
|
||||
void *idr_replace(struct idr *idp, void *ptr, int id);
|
||||
void idr_remove(struct idr *idp, int id);
|
||||
void idr_remove_all(struct idr *idp);
|
||||
void idr_free(struct idr *idp, int id);
|
||||
void idr_destroy(struct idr *idp);
|
||||
void idr_init(struct idr *idp);
|
||||
|
||||
/**
|
||||
* idr_preload_end - end preload section started with idr_preload()
|
||||
*
|
||||
* Each idr_preload() should be matched with an invocation of this
|
||||
* function. See idr_preload() for details.
|
||||
*/
|
||||
static inline void idr_preload_end(void)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_find - return pointer for given id
|
||||
* @idp: idr handle
|
||||
* @id: lookup key
|
||||
*
|
||||
* Return the pointer given the id it has been registered with. A %NULL
|
||||
* return indicates that @id is not valid or you passed %NULL in
|
||||
* idr_get_new().
|
||||
*
|
||||
* This function can be called under rcu_read_lock(), given that the leaf
|
||||
* pointers lifetimes are correctly managed.
|
||||
*/
|
||||
static inline void *idr_find(struct idr *idr, int id)
|
||||
{
|
||||
struct idr_layer *hint = rcu_dereference_raw(idr->hint);
|
||||
|
||||
if (hint && (id & ~IDR_MASK) == hint->prefix)
|
||||
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
|
||||
|
||||
return idr_find_slowpath(idr, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_get_new - allocate new idr entry
|
||||
* @idp: idr handle
|
||||
* @ptr: pointer you want associated with the id
|
||||
* @id: pointer to the allocated handle
|
||||
*
|
||||
* Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
|
||||
*/
|
||||
static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
|
||||
{
|
||||
return idr_get_new_above(idp, ptr, 0, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_for_each_entry - iterate over an idr's elements of a given type
|
||||
* @idp: idr handle
|
||||
* @entry: the type * to use as cursor
|
||||
* @id: id entry's key
|
||||
*/
|
||||
#define idr_for_each_entry(idp, entry, id) \
|
||||
for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
|
||||
entry != NULL; \
|
||||
++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
|
||||
|
||||
void __idr_remove_all(struct idr *idp); /* don't use */
|
||||
|
||||
/**
|
||||
* idr_remove_all - remove all ids from the given idr tree
|
||||
* @idp: idr handle
|
||||
*
|
||||
* If you're trying to destroy @idp, calling idr_destroy() is enough.
|
||||
* This is going away. Don't use.
|
||||
*/
|
||||
static inline void __deprecated idr_remove_all(struct idr *idp)
|
||||
{
|
||||
__idr_remove_all(idp);
|
||||
}
|
||||
|
||||
/*
|
||||
* IDA - IDR based id allocator, use when translation from id to
|
||||
@@ -141,7 +183,6 @@ struct ida {
|
||||
|
||||
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
||||
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
|
||||
int ida_get_new(struct ida *ida, int *p_id);
|
||||
void ida_remove(struct ida *ida, int id);
|
||||
void ida_destroy(struct ida *ida);
|
||||
void ida_init(struct ida *ida);
|
||||
@@ -150,17 +191,18 @@ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
|
||||
gfp_t gfp_mask);
|
||||
void ida_simple_remove(struct ida *ida, unsigned int id);
|
||||
|
||||
/**
|
||||
* ida_get_new - allocate new ID
|
||||
* @ida: idr handle
|
||||
* @p_id: pointer to the allocated handle
|
||||
*
|
||||
* Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
|
||||
*/
|
||||
static inline int ida_get_new(struct ida *ida, int *p_id)
|
||||
{
|
||||
return ida_get_new_above(ida, 0, p_id);
|
||||
}
|
||||
|
||||
void __init idr_init_cache(void);
|
||||
|
||||
/**
|
||||
* idr_for_each_entry - iterate over an idr's elements of a given type
|
||||
* @idp: idr handle
|
||||
* @entry: the type * to use as cursor
|
||||
* @id: id entry's key
|
||||
*/
|
||||
#define idr_for_each_entry(idp, entry, id) \
|
||||
for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
|
||||
entry != NULL; \
|
||||
++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
|
||||
|
||||
#endif /* __IDR_H__ */
|
||||
|
@@ -216,11 +216,10 @@ static inline struct hlist_head *team_port_index_hash(struct team *team,
|
||||
static inline struct team_port *team_get_port_by_index(struct team *team,
|
||||
int port_index)
|
||||
{
|
||||
struct hlist_node *p;
|
||||
struct team_port *port;
|
||||
struct hlist_head *head = team_port_index_hash(team, port_index);
|
||||
|
||||
hlist_for_each_entry(port, p, head, hlist)
|
||||
hlist_for_each_entry(port, head, hlist)
|
||||
if (port->index == port_index)
|
||||
return port;
|
||||
return NULL;
|
||||
@@ -228,11 +227,10 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
|
||||
static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
|
||||
int port_index)
|
||||
{
|
||||
struct hlist_node *p;
|
||||
struct team_port *port;
|
||||
struct hlist_head *head = team_port_index_hash(team, port_index);
|
||||
|
||||
hlist_for_each_entry_rcu(port, p, head, hlist)
|
||||
hlist_for_each_entry_rcu(port, head, hlist)
|
||||
if (port->index == port_index)
|
||||
return port;
|
||||
return NULL;
|
||||
|
@@ -35,10 +35,6 @@
|
||||
|
||||
#include <uapi/linux/ipmi.h>
|
||||
|
||||
|
||||
/*
|
||||
* The in-kernel interface.
|
||||
*/
|
||||
#include <linux/list.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
|
@@ -666,54 +666,49 @@ static inline void hlist_move_list(struct hlist_head *old,
|
||||
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
|
||||
pos = n)
|
||||
|
||||
#define hlist_entry_safe(ptr, type, member) \
|
||||
(ptr) ? hlist_entry(ptr, type, member) : NULL
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry - iterate over list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry(tpos, pos, head, member) \
|
||||
for (pos = (head)->first; \
|
||||
pos && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = pos->next)
|
||||
#define hlist_for_each_entry(pos, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
|
||||
pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_continue(tpos, pos, member) \
|
||||
for (pos = (pos)->next; \
|
||||
pos && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = pos->next)
|
||||
#define hlist_for_each_entry_continue(pos, member) \
|
||||
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
|
||||
pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_from(tpos, pos, member) \
|
||||
for (; pos && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = pos->next)
|
||||
#define hlist_for_each_entry_from(pos, member) \
|
||||
for (; pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @n: another &struct hlist_node to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
|
||||
for (pos = (head)->first; \
|
||||
pos && ({ n = pos->next; 1; }) && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = n)
|
||||
#define hlist_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
|
||||
pos && ({ n = pos->member.next; 1; }); \
|
||||
pos = hlist_entry_safe(n, typeof(*pos), member))
|
||||
|
||||
#endif
|
||||
|
@@ -16,6 +16,7 @@
|
||||
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/pwm.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
#define LP8788_DEV_BUCK "lp8788-buck"
|
||||
@@ -124,11 +125,6 @@ enum lp8788_bl_ramp_step {
|
||||
LP8788_RAMP_65538us,
|
||||
};
|
||||
|
||||
enum lp8788_bl_pwm_polarity {
|
||||
LP8788_PWM_ACTIVE_HIGH,
|
||||
LP8788_PWM_ACTIVE_LOW,
|
||||
};
|
||||
|
||||
enum lp8788_isink_scale {
|
||||
LP8788_ISINK_SCALE_100mA,
|
||||
LP8788_ISINK_SCALE_120mA,
|
||||
@@ -228,16 +224,6 @@ struct lp8788_charger_platform_data {
|
||||
enum lp8788_charger_event event);
|
||||
};
|
||||
|
||||
/*
|
||||
* struct lp8788_bl_pwm_data
|
||||
* @pwm_set_intensity : set duty of pwm
|
||||
* @pwm_get_intensity : get current duty of pwm
|
||||
*/
|
||||
struct lp8788_bl_pwm_data {
|
||||
void (*pwm_set_intensity) (int brightness, int max_brightness);
|
||||
int (*pwm_get_intensity) (int max_brightness);
|
||||
};
|
||||
|
||||
/*
|
||||
* struct lp8788_backlight_platform_data
|
||||
* @name : backlight driver name. (default: "lcd-backlight")
|
||||
@@ -248,8 +234,8 @@ struct lp8788_bl_pwm_data {
|
||||
* @rise_time : brightness ramp up step time
|
||||
* @fall_time : brightness ramp down step time
|
||||
* @pwm_pol : pwm polarity setting when bl_mode is pwm based
|
||||
* @pwm_data : platform specific pwm generation functions
|
||||
* only valid when bl_mode is pwm based
|
||||
* @period_ns : platform specific pwm period value. unit is nano.
|
||||
Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED
|
||||
*/
|
||||
struct lp8788_backlight_platform_data {
|
||||
char *name;
|
||||
@@ -259,8 +245,8 @@ struct lp8788_backlight_platform_data {
|
||||
enum lp8788_bl_full_scale_current full_scale;
|
||||
enum lp8788_bl_ramp_step rise_time;
|
||||
enum lp8788_bl_ramp_step fall_time;
|
||||
enum lp8788_bl_pwm_polarity pwm_pol;
|
||||
struct lp8788_bl_pwm_data pwm_data;
|
||||
enum pwm_polarity pwm_pol;
|
||||
unsigned int period_ns;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -176,9 +176,8 @@ pid_t pid_vnr(struct pid *pid);
|
||||
|
||||
#define do_each_pid_task(pid, type, task) \
|
||||
do { \
|
||||
struct hlist_node *pos___; \
|
||||
if ((pid) != NULL) \
|
||||
hlist_for_each_entry_rcu((task), pos___, \
|
||||
hlist_for_each_entry_rcu((task), \
|
||||
&(pid)->tasks[type], pids[type].node) {
|
||||
|
||||
/*
|
||||
|
@@ -445,8 +445,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
@@ -454,16 +453,16 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \
|
||||
pos && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference_raw(hlist_next_rcu(pos)))
|
||||
#define hlist_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
@@ -471,35 +470,36 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference_bh((head)->first); \
|
||||
pos && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference_bh(pos->next))
|
||||
#define hlist_for_each_entry_rcu_bh(pos, head, member) \
|
||||
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \
|
||||
for (pos = rcu_dereference((pos)->next); \
|
||||
pos && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
#define hlist_for_each_entry_continue_rcu(pos, member) \
|
||||
for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
|
||||
typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \
|
||||
for (pos = rcu_dereference_bh((pos)->next); \
|
||||
pos && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference_bh(pos->next))
|
||||
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
|
||||
for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
|
||||
typeof(*(pos)), member))
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@@ -231,6 +231,41 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
*/
|
||||
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
|
||||
|
||||
/*
|
||||
* sg page iterator
|
||||
*
|
||||
* Iterates over sg entries page-by-page. On each successful iteration,
|
||||
* @piter->page points to the current page, @piter->sg to the sg holding this
|
||||
* page and @piter->sg_pgoffset to the page's page offset within the sg. The
|
||||
* iteration will stop either when a maximum number of sg entries was reached
|
||||
* or a terminating sg (sg_last(sg) == true) was reached.
|
||||
*/
|
||||
struct sg_page_iter {
|
||||
struct page *page; /* current page */
|
||||
struct scatterlist *sg; /* sg holding the page */
|
||||
unsigned int sg_pgoffset; /* page offset within the sg */
|
||||
|
||||
/* these are internal states, keep away */
|
||||
unsigned int __nents; /* remaining sg entries */
|
||||
int __pg_advance; /* nr pages to advance at the
|
||||
* next step */
|
||||
};
|
||||
|
||||
bool __sg_page_iter_next(struct sg_page_iter *piter);
|
||||
void __sg_page_iter_start(struct sg_page_iter *piter,
|
||||
struct scatterlist *sglist, unsigned int nents,
|
||||
unsigned long pgoffset);
|
||||
|
||||
/**
|
||||
* for_each_sg_page - iterate over the pages of the given sg list
|
||||
* @sglist: sglist to iterate over
|
||||
* @piter: page iterator to hold current page, sg, sg_pgoffset
|
||||
* @nents: maximum number of sg entries to iterate over
|
||||
* @pgoffset: starting page offset
|
||||
*/
|
||||
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
|
||||
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
|
||||
__sg_page_iter_next(piter);)
|
||||
|
||||
/*
|
||||
* Mapping sg iterator
|
||||
@@ -258,11 +293,11 @@ struct sg_mapping_iter {
|
||||
void *addr; /* pointer to the mapped area */
|
||||
size_t length; /* length of the mapped area */
|
||||
size_t consumed; /* number of consumed bytes */
|
||||
struct sg_page_iter piter; /* page iterator */
|
||||
|
||||
/* these are internal states, keep away */
|
||||
struct scatterlist *__sg; /* current entry */
|
||||
unsigned int __nents; /* nr of remaining entries */
|
||||
unsigned int __offset; /* offset within sg */
|
||||
unsigned int __offset; /* offset within page */
|
||||
unsigned int __remaining; /* remaining bytes on page */
|
||||
unsigned int __flags;
|
||||
};
|
||||
|
||||
|
@@ -346,11 +346,6 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
|
||||
extern void set_dumpable(struct mm_struct *mm, int value);
|
||||
extern int get_dumpable(struct mm_struct *mm);
|
||||
|
||||
/* get/set_dumpable() values */
|
||||
#define SUID_DUMPABLE_DISABLED 0
|
||||
#define SUID_DUMPABLE_ENABLED 1
|
||||
#define SUID_DUMPABLE_SAFE 2
|
||||
|
||||
/* mm flags */
|
||||
/* dumpable bits */
|
||||
#define MMF_DUMPABLE 0 /* core dump is permitted */
|
||||
|
Reference in New Issue
Block a user