Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6

This commit is contained in:
Rusty Russell
2008-12-30 08:02:35 +10:30
2974 changed files with 145857 additions and 82592 deletions

View File

@@ -231,10 +231,21 @@ static __inline__ int atmpvc_addr_in_use(struct sockaddr_atmpvc addr)
*/
struct atmif_sioc {
int number;
int length;
void __user *arg;
int number;
int length;
void __user *arg;
};
#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
struct compat_atmif_sioc {
int number;
int length;
compat_uptr_t arg;
};
#endif
#endif
typedef unsigned short atm_backend_t;
#endif

View File

@@ -100,6 +100,10 @@ struct atm_dev_stats {
/* use backend to make new if */
#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf)
/* add party to p2mp call */
#ifdef CONFIG_COMPAT
/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */
#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf)
#endif
#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int)
/* drop party from p2mp call */
@@ -224,6 +228,13 @@ struct atm_cirange {
extern struct proc_dir_entry *atm_proc_root;
#endif
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
struct compat_atm_iobuf {
int length;
compat_uptr_t buffer;
};
#endif
struct k_atm_aal_stats {
#define __HANDLE_ITEM(i) atomic_t i
@@ -379,6 +390,10 @@ struct atmdev_ops { /* only send is required */
int (*open)(struct atm_vcc *vcc);
void (*close)(struct atm_vcc *vcc);
int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg);
#ifdef CONFIG_COMPAT
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
void __user *optval,int optlen);
int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,

View File

@@ -99,6 +99,8 @@
#define AUDIT_OBJ_PID 1318 /* ptrace target */
#define AUDIT_TTY 1319 /* Input on an administrative TTY */
#define AUDIT_EOE 1320 /* End of multi-record event */
#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */
#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -453,6 +455,10 @@ extern int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_pr
extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout);
extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification);
extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *new,
const struct cred *old);
extern int __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old);
static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -502,6 +508,24 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
return __audit_mq_getsetattr(mqdes, mqstat);
return 0;
}
static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *new,
const struct cred *old)
{
if (unlikely(!audit_dummy_context()))
return __audit_log_bprm_fcaps(bprm, new, old);
return 0;
}
static inline int audit_log_capset(pid_t pid, const struct cred *new,
const struct cred *old)
{
if (unlikely(!audit_dummy_context()))
return __audit_log_capset(pid, new, old);
return 0;
}
extern int audit_n_rules;
extern int audit_signals;
#else
@@ -534,6 +558,8 @@ extern int audit_signals;
#define audit_mq_timedreceive(d,l,p,t) ({ 0; })
#define audit_mq_notify(d,n) ({ 0; })
#define audit_mq_getsetattr(d,s) ({ 0; })
#define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; })
#define audit_log_capset(pid, ncr, ocr) ({ 0; })
#define audit_ptrace(t) ((void)0)
#define audit_n_rules 0
#define audit_signals 0

View File

@@ -35,16 +35,20 @@ struct linux_binprm{
struct mm_struct *mm;
unsigned long p; /* current top of mem */
unsigned int sh_bang:1,
misc_bang:1;
misc_bang:1,
cred_prepared:1,/* true if creds already prepared (multiple
* preps happen for interpreters) */
cap_effective:1;/* true if has elevated effective capabilities,
* false if not; except for init which inherits
* its parent's caps anyway */
#ifdef __alpha__
unsigned int taso:1;
#endif
unsigned int recursion_depth;
struct file * file;
int e_uid, e_gid;
kernel_cap_t cap_post_exec_permitted;
bool cap_effective;
void *security;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
unsigned int per_clear; /* bits to clear in current->personality */
int argc, envc;
char * filename; /* Name of binary as seen by procps */
char * interp; /* Name of the binary really executed. Most
@@ -101,7 +105,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
int executable_stack);
extern int bprm_mm_init(struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
extern void compute_creds(struct linux_binprm *binprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
extern int set_binfmt(struct linux_binfmt *new);
extern void free_bprm(struct linux_binprm *);

View File

@@ -160,7 +160,6 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
extern int do_blk_trace_setup(struct request_queue *q,
char *name, dev_t dev, struct blk_user_trace_setup *buts);
extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
} while (0)
#define BLK_TN_MAX_MSG 128
/**
* blk_add_trace_rq - Add a trace for a request oriented action
* @q: queue the io is for
* @rq: the source request
* @what: the action
*
* Description:
* Records an action against a request. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
int rw = rq->cmd_flags & 0x03;
if (likely(!bt))
return;
if (blk_discard_rq(rq))
rw |= (1 << BIO_RW_DISCARD);
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
}
}
/**
* blk_add_trace_bio - Add a trace for a bio oriented action
* @q: queue the io is for
* @bio: the source bio
* @what: the action
*
* Description:
* Records an action against a bio. Will log the bio offset + size.
*
**/
static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
}
/**
* blk_add_trace_generic - Add a trace for a generic action
* @q: queue the io is for
* @bio: the source bio
* @rw: the data direction
* @what: the action
*
* Description:
* Records a simple trace
*
**/
static inline void blk_add_trace_generic(struct request_queue *q,
struct bio *bio, int rw, u32 what)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
if (bio)
blk_add_trace_bio(q, bio, what);
else
__blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
}
/**
* blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
* @q: queue the io is for
* @what: the action
* @bio: the source bio
* @pdu: the integer payload
*
* Description:
* Adds a trace with some integer payload. This might be an unplug
* option given as the action, with the depth at unplug time given
* as the payload
*
**/
static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
struct bio *bio, unsigned int pdu)
{
struct blk_trace *bt = q->blk_trace;
__be64 rpdu = cpu_to_be64(pdu);
if (likely(!bt))
return;
if (bio)
__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
else
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
}
/**
* blk_add_trace_remap - Add a trace for a remap operation
* @q: queue the io is for
* @bio: the source bio
* @dev: target device
* @from: source sector
* @to: target sector
*
* Description:
* Device mapper or raid target sometimes need to split a bio because
* it spans a stripe (or similar). Add a trace for that action.
*
**/
static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from, sector_t to)
{
struct blk_trace *bt = q->blk_trace;
struct blk_io_trace_remap r;
if (likely(!bt))
return;
r.device = cpu_to_be32(dev);
r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
r.sector = cpu_to_be64(to);
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
}
/**
* blk_add_driver_data - Add binary message with driver-specific data
* @q: queue the io is for
* @rq: io request
* @data: driver-specific data
* @len: length of driver-specific data
*
* Description:
* Some drivers might want to write driver-specific data per request.
*
**/
static inline void blk_add_driver_data(struct request_queue *q,
struct request *rq,
void *data, size_t len)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return;
if (blk_pc_request(rq))
__blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
rq->errors, len, data);
else
__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
0, BLK_TA_DRV_DATA, rq->errors, len, data);
}
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
@@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q);
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)
#define blk_add_trace_rq(q, rq, what) do { } while (0)
#define blk_add_trace_bio(q, rq, what) do { } while (0)
#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
#define blk_add_driver_data(q, rq, data, len) do {} while (0)
#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
#define blk_add_driver_data(q, rq, data, len) do {} while (0)
#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
#define blk_trace_startstop(q, start) (-ENOTTY)
#define blk_trace_remove(q) (-ENOTTY)

View File

@@ -53,6 +53,7 @@ typedef struct __user_cap_data_struct {
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
#define VFS_CAP_REVISION_MASK 0xFF000000
#define VFS_CAP_REVISION_SHIFT 24
#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK
#define VFS_CAP_FLAGS_EFFECTIVE 0x000001
@@ -68,6 +69,9 @@ typedef struct __user_cap_data_struct {
#define VFS_CAP_U32 VFS_CAP_U32_2
#define VFS_CAP_REVISION VFS_CAP_REVISION_2
#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
extern int file_caps_enabled;
#endif
struct vfs_cap_data {
__le32 magic_etc; /* Little endian */
@@ -96,6 +100,13 @@ typedef struct kernel_cap_struct {
__u32 cap[_KERNEL_CAPABILITY_U32S];
} kernel_cap_t;
/* exact same as vfs_cap_data but in cpu endian and always filled completely */
struct cpu_vfs_cap_data {
__u32 magic_etc;
kernel_cap_t permitted;
kernel_cap_t inheritable;
};
#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
@@ -454,6 +465,13 @@ static inline int cap_isclear(const kernel_cap_t a)
return 1;
}
/*
* Check if "a" is a subset of "set".
* return 1 if ALL of the capabilities in "a" are also in "set"
* cap_issubset(0101, 1111) will return 1
* return 0 if ANY of the capabilities in "a" are not in "set"
* cap_issubset(1111, 0101) will return 0
*/
static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
kernel_cap_t dest;
@@ -501,8 +519,6 @@ extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_full_set;
extern const kernel_cap_t __cap_init_eff_set;
kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
/**
* has_capability - Determine if a task has a superior capability available
* @t: The task in question
@@ -514,9 +530,14 @@ kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
* Note that this does not set PF_SUPERPRIV on the task.
*/
#define has_capability(t, cap) (security_capable((t), (cap)) == 0)
#define has_capability_noaudit(t, cap) (security_capable_noaudit((t), (cap)) == 0)
extern int capable(int cap);
/* audit system wants to get cap info from files as well */
struct dentry;
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
#endif /* __KERNEL__ */
#endif /* !_LINUX_CAPABILITY_H */

View File

@@ -54,3 +54,9 @@ SUBSYS(freezer)
#endif
/* */
#ifdef CONFIG_NET_CLS_CGROUP
SUBSYS(net_cls)
#endif
/* */

View File

@@ -59,8 +59,88 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* specific implementations come from the above header files
*/
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
union {
struct {
unsigned long correct;
unsigned long incorrect;
};
struct {
unsigned long miss;
unsigned long hit;
};
};
};
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect) ({ \
int ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \
______f = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
}; \
______r = likely_notrace(x); \
ftrace_likely_update(&______f, ______r, expect); \
______r; \
})
/*
* Using __builtin_constant_p(x) to ignore cases where the return
* value is always the same. This idea is taken from a similar patch
* written by Daniel Walker.
*/
# ifndef likely
# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
# endif
# ifndef unlikely
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
# endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
/*
* "Define 'is'", Bill Clinton
* "Define 'if'", Steven Rostedt
*/
#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_branch"))) \
______f = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
}; \
______r = !!(cond); \
if (______r) \
______f.hit++; \
else \
______f.miss++; \
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif
/* Optimization barrier */
#ifndef barrier

View File

@@ -3,9 +3,9 @@
#include <linux/types.h>
extern u32 crc32c_le(u32 crc, unsigned char const *address, size_t length);
extern u32 crc32c_be(u32 crc, unsigned char const *address, size_t length);
extern u32 crc32c(u32 crc, const void *address, unsigned int length);
#define crc32c(seed, data, length) crc32c_le(seed, (unsigned char const *)data, length)
/* This macro exists for backwards-compatibility. */
#define crc32c_le crc32c
#endif /* _LINUX_CRC32C_H */

View File

@@ -1,4 +1,4 @@
/* Credentials management
/* Credentials management - see Documentation/credentials.txt
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,39 +12,335 @@
#ifndef _LINUX_CRED_H
#define _LINUX_CRED_H
#define get_current_user() (get_uid(current->user))
#include <linux/capability.h>
#include <linux/key.h>
#include <asm/atomic.h>
#define task_uid(task) ((task)->uid)
#define task_gid(task) ((task)->gid)
#define task_euid(task) ((task)->euid)
#define task_egid(task) ((task)->egid)
struct user_struct;
struct cred;
struct inode;
#define current_uid() (current->uid)
#define current_gid() (current->gid)
#define current_euid() (current->euid)
#define current_egid() (current->egid)
#define current_suid() (current->suid)
#define current_sgid() (current->sgid)
#define current_fsuid() (current->fsuid)
#define current_fsgid() (current->fsgid)
#define current_cap() (current->cap_effective)
/*
* COW Supplementary groups list
*/
#define NGROUPS_SMALL 32
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
struct group_info {
atomic_t usage;
int ngroups;
int nblocks;
gid_t small_block[NGROUPS_SMALL];
gid_t *blocks[0];
};
/**
* get_group_info - Get a reference to a group info structure
* @group_info: The group info to reference
*
* This gets a reference to a set of supplementary groups.
*
* If the caller is accessing a task's credentials, they must hold the RCU read
* lock when reading.
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
atomic_inc(&gi->usage);
return gi;
}
/**
* put_group_info - Release a reference to a group info structure
* @group_info: The group info to release
*/
#define put_group_info(group_info) \
do { \
if (atomic_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
extern struct group_info *groups_alloc(int);
extern struct group_info init_groups;
extern void groups_free(struct group_info *);
extern int set_current_groups(struct group_info *);
extern int set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, gid_t);
/* access the groups "array" with this macro */
#define GROUP_AT(gi, i) \
((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
/*
* The common credentials for a thread group
* - shared by CLONE_THREAD
*/
#ifdef CONFIG_KEYS
struct thread_group_cred {
atomic_t usage;
pid_t tgid; /* thread group process ID */
spinlock_t lock;
struct key *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct rcu_head rcu; /* RCU deletion hook */
};
#endif
/*
* The security context of a task
*
* The parts of the context break down into two categories:
*
* (1) The objective context of a task. These parts are used when some other
* task is attempting to affect this one.
*
* (2) The subjective context. These details are used when the task is acting
* upon another object, be that a file, a task, a key or whatever.
*
* Note that some members of this structure belong to both categories - the
* LSM security pointer for instance.
*
* A task has two security pointers. task->real_cred points to the objective
* context that defines that task's actual details. The objective part of this
* context is used whenever that task is acted upon.
*
* task->cred points to the subjective context that defines the details of how
* that task is going to act upon another object. This may be overridden
* temporarily to point to another security context, but normally points to the
* same context as task->real_cred.
*/
struct cred {
atomic_t usage;
uid_t uid; /* real UID of the task */
gid_t gid; /* real GID of the task */
uid_t suid; /* saved UID of the task */
gid_t sgid; /* saved GID of the task */
uid_t euid; /* effective UID of the task */
gid_t egid; /* effective GID of the task */
uid_t fsuid; /* UID for VFS ops */
gid_t fsgid; /* GID for VFS ops */
unsigned securebits; /* SUID-less security management */
kernel_cap_t cap_inheritable; /* caps our children can inherit */
kernel_cap_t cap_permitted; /* caps we're permitted */
kernel_cap_t cap_effective; /* caps we can actually use */
kernel_cap_t cap_bset; /* capability bounding set */
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
struct thread_group_cred *tgcred; /* thread-group shared credentials */
#endif
#ifdef CONFIG_SECURITY
void *security; /* subjective LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
};
extern void __put_cred(struct cred *);
extern int copy_creds(struct task_struct *, unsigned long);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern struct cred *prepare_usermodehelper_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
extern const struct cred *override_creds(const struct cred *);
extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern void __init cred_init(void);
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
*
* Get a reference on the specified set of new credentials. The caller must
* release the reference.
*/
static inline struct cred *get_new_cred(struct cred *cred)
{
atomic_inc(&cred->usage);
return cred;
}
/**
* get_cred - Get a reference on a set of credentials
* @cred: The credentials to reference
*
* Get a reference on the specified set of credentials. The caller must
* release the reference.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
* usage count. The purpose of this is to attempt to catch at compile time the
* accidental alteration of a set of credentials that should be considered
* immutable.
*/
static inline const struct cred *get_cred(const struct cred *cred)
{
return get_new_cred((struct cred *) cred);
}
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
*
* Release a reference to a set of credentials, deleting them when the last ref
* is released.
*
* This takes a const pointer to a set of credentials because the credentials
* on task_struct are attached by const pointers to prevent accidental
* alteration of otherwise immutable credential sets.
*/
static inline void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;
BUG_ON(atomic_read(&(cred)->usage) <= 0);
if (atomic_dec_and_test(&(cred)->usage))
__put_cred(cred);
}
/**
* current_cred - Access the current task's subjective credentials
*
* Access the subjective credentials of the current task.
*/
#define current_cred() \
(current->cred)
/**
* __task_cred - Access a task's objective credentials
* @task: The task to query
*
* Access the objective credentials of a task. The caller must hold the RCU
* readlock.
*
* The caller must make sure task doesn't go away, either by holding a ref on
* task or by holding tasklist_lock to prevent it from being unlinked.
*/
#define __task_cred(task) \
((const struct cred *)(rcu_dereference((task)->real_cred)))
/**
* get_task_cred - Get another task's objective credentials
* @task: The task to query
*
* Get the objective credentials of a task, pinning them so that they can't go
* away. Accessing a task's credentials directly is not permitted.
*
* The caller must make sure task doesn't go away, either by holding a ref on
* task or by holding tasklist_lock to prevent it from being unlinked.
*/
#define get_task_cred(task) \
({ \
struct cred *__cred; \
rcu_read_lock(); \
__cred = (struct cred *) __task_cred((task)); \
get_cred(__cred); \
rcu_read_unlock(); \
__cred; \
})
/**
* get_current_cred - Get the current task's subjective credentials
*
* Get the subjective credentials of the current task, pinning them so that
* they can't go away. Accessing the current task's credentials directly is
* not permitted.
*/
#define get_current_cred() \
(get_cred(current_cred()))
/**
* get_current_user - Get the current task's user_struct
*
* Get the user record of the current task, pinning it so that it can't go
* away.
*/
#define get_current_user() \
({ \
struct user_struct *__u; \
struct cred *__cred; \
__cred = (struct cred *) current_cred(); \
__u = get_uid(__cred->user); \
__u; \
})
/**
* get_current_groups - Get the current task's supplementary group list
*
* Get the supplementary group list of the current task, pinning it so that it
* can't go away.
*/
#define get_current_groups() \
({ \
struct group_info *__groups; \
struct cred *__cred; \
__cred = (struct cred *) current_cred(); \
__groups = get_group_info(__cred->group_info); \
__groups; \
})
#define task_cred_xxx(task, xxx) \
({ \
__typeof__(((struct cred *)NULL)->xxx) ___val; \
rcu_read_lock(); \
___val = __task_cred((task))->xxx; \
rcu_read_unlock(); \
___val; \
})
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
#define current_cred_xxx(xxx) \
({ \
current->cred->xxx; \
})
#define current_uid() (current_cred_xxx(uid))
#define current_gid() (current_cred_xxx(gid))
#define current_euid() (current_cred_xxx(euid))
#define current_egid() (current_cred_xxx(egid))
#define current_suid() (current_cred_xxx(suid))
#define current_sgid() (current_cred_xxx(sgid))
#define current_fsuid() (current_cred_xxx(fsuid))
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
#define current_user_ns() (current_cred_xxx(user)->user_ns)
#define current_security() (current_cred_xxx(security))
#define current_uid_gid(_uid, _gid) \
do { \
*(_uid) = current->uid; \
*(_gid) = current->gid; \
const struct cred *__cred; \
__cred = current_cred(); \
*(_uid) = __cred->uid; \
*(_gid) = __cred->gid; \
} while(0)
#define current_euid_egid(_uid, _gid) \
#define current_euid_egid(_euid, _egid) \
do { \
*(_uid) = current->euid; \
*(_gid) = current->egid; \
const struct cred *__cred; \
__cred = current_cred(); \
*(_euid) = __cred->euid; \
*(_egid) = __cred->egid; \
} while(0)
#define current_fsuid_fsgid(_uid, _gid) \
#define current_fsuid_fsgid(_fsuid, _fsgid) \
do { \
*(_uid) = current->fsuid; \
*(_gid) = current->fsgid; \
const struct cred *__cred; \
__cred = current_cred(); \
*(_fsuid) = __cred->fsuid; \
*(_fsgid) = __cred->fsgid; \
} while(0)
#endif /* _LINUX_CRED_H */

View File

@@ -36,7 +36,8 @@
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
#define CRYPTO_ALG_TYPE_HASH 0x00000009
#define CRYPTO_ALG_TYPE_HASH 0x00000008
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
@@ -220,6 +221,7 @@ struct ablkcipher_alg {
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*reinit)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
@@ -480,6 +482,8 @@ struct crypto_tfm {
struct compress_tfm compress;
struct rng_tfm rng;
} crt_u;
void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
@@ -544,7 +548,9 @@ struct crypto_attr_u32 {
* Transform user interface.
*/
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_free_tfm(struct crypto_tfm *tfm);

340
include/linux/dcbnl.h Normal file
View File

@@ -0,0 +1,340 @@
/*
* Copyright (c) 2008, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Author: Lucy Liu <lucy.liu@intel.com>
*/
#ifndef __LINUX_DCBNL_H__
#define __LINUX_DCBNL_H__
#define DCB_PROTO_VERSION 1
struct dcbmsg {
unsigned char dcb_family;
__u8 cmd;
__u16 dcb_pad;
};
/**
* enum dcbnl_commands - supported DCB commands
*
* @DCB_CMD_UNDEFINED: unspecified command to catch errors
* @DCB_CMD_GSTATE: request the state of DCB in the device
* @DCB_CMD_SSTATE: set the state of DCB in the device
* @DCB_CMD_PGTX_GCFG: request the priority group configuration for Tx
* @DCB_CMD_PGTX_SCFG: set the priority group configuration for Tx
* @DCB_CMD_PGRX_GCFG: request the priority group configuration for Rx
* @DCB_CMD_PGRX_SCFG: set the priority group configuration for Rx
* @DCB_CMD_PFC_GCFG: request the priority flow control configuration
* @DCB_CMD_PFC_SCFG: set the priority flow control configuration
* @DCB_CMD_SET_ALL: apply all changes to the underlying device
* @DCB_CMD_GPERM_HWADDR: get the permanent MAC address of the underlying
* device. Only useful when using bonding.
* @DCB_CMD_GCAP: request the DCB capabilities of the device
* @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
* @DCB_CMD_SNUMTCS: set the number of traffic classes
* @DCB_CMD_GBCN: set backward congestion notification configuration
* @DCB_CMD_SBCN: get backward congestion notification configration.
*/
enum dcbnl_commands {
DCB_CMD_UNDEFINED,
DCB_CMD_GSTATE,
DCB_CMD_SSTATE,
DCB_CMD_PGTX_GCFG,
DCB_CMD_PGTX_SCFG,
DCB_CMD_PGRX_GCFG,
DCB_CMD_PGRX_SCFG,
DCB_CMD_PFC_GCFG,
DCB_CMD_PFC_SCFG,
DCB_CMD_SET_ALL,
DCB_CMD_GPERM_HWADDR,
DCB_CMD_GCAP,
DCB_CMD_GNUMTCS,
DCB_CMD_SNUMTCS,
DCB_CMD_PFC_GSTATE,
DCB_CMD_PFC_SSTATE,
DCB_CMD_BCN_GCFG,
DCB_CMD_BCN_SCFG,
__DCB_CMD_ENUM_MAX,
DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
};
/**
* enum dcbnl_attrs - DCB top-level netlink attributes
*
* @DCB_ATTR_UNDEFINED: unspecified attribute to catch errors
* @DCB_ATTR_IFNAME: interface name of the underlying device (NLA_STRING)
* @DCB_ATTR_STATE: enable state of DCB in the device (NLA_U8)
* @DCB_ATTR_PFC_STATE: enable state of PFC in the device (NLA_U8)
* @DCB_ATTR_PFC_CFG: priority flow control configuration (NLA_NESTED)
* @DCB_ATTR_NUM_TC: number of traffic classes supported in the device (NLA_U8)
* @DCB_ATTR_PG_CFG: priority group configuration (NLA_NESTED)
* @DCB_ATTR_SET_ALL: bool to commit changes to hardware or not (NLA_U8)
* @DCB_ATTR_PERM_HWADDR: MAC address of the physical device (NLA_NESTED)
* @DCB_ATTR_CAP: DCB capabilities of the device (NLA_NESTED)
* @DCB_ATTR_NUMTCS: number of traffic classes supported (NLA_NESTED)
* @DCB_ATTR_BCN: backward congestion notification configuration (NLA_NESTED)
*/
enum dcbnl_attrs {
DCB_ATTR_UNDEFINED,
DCB_ATTR_IFNAME,
DCB_ATTR_STATE,
DCB_ATTR_PFC_STATE,
DCB_ATTR_PFC_CFG,
DCB_ATTR_NUM_TC,
DCB_ATTR_PG_CFG,
DCB_ATTR_SET_ALL,
DCB_ATTR_PERM_HWADDR,
DCB_ATTR_CAP,
DCB_ATTR_NUMTCS,
DCB_ATTR_BCN,
__DCB_ATTR_ENUM_MAX,
DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
};
/**
* enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
*
* @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors
* @DCB_PFC_UP_ATTR_0: Priority Flow Control value for User Priority 0 (NLA_U8)
* @DCB_PFC_UP_ATTR_1: Priority Flow Control value for User Priority 1 (NLA_U8)
* @DCB_PFC_UP_ATTR_2: Priority Flow Control value for User Priority 2 (NLA_U8)
* @DCB_PFC_UP_ATTR_3: Priority Flow Control value for User Priority 3 (NLA_U8)
* @DCB_PFC_UP_ATTR_4: Priority Flow Control value for User Priority 4 (NLA_U8)
* @DCB_PFC_UP_ATTR_5: Priority Flow Control value for User Priority 5 (NLA_U8)
* @DCB_PFC_UP_ATTR_6: Priority Flow Control value for User Priority 6 (NLA_U8)
* @DCB_PFC_UP_ATTR_7: Priority Flow Control value for User Priority 7 (NLA_U8)
* @DCB_PFC_UP_ATTR_MAX: highest attribute number currently defined
* @DCB_PFC_UP_ATTR_ALL: apply to all priority flow control attrs (NLA_FLAG)
*
*/
enum dcbnl_pfc_up_attrs {
DCB_PFC_UP_ATTR_UNDEFINED,
DCB_PFC_UP_ATTR_0,
DCB_PFC_UP_ATTR_1,
DCB_PFC_UP_ATTR_2,
DCB_PFC_UP_ATTR_3,
DCB_PFC_UP_ATTR_4,
DCB_PFC_UP_ATTR_5,
DCB_PFC_UP_ATTR_6,
DCB_PFC_UP_ATTR_7,
DCB_PFC_UP_ATTR_ALL,
__DCB_PFC_UP_ATTR_ENUM_MAX,
DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1,
};
/**
* enum dcbnl_pg_attrs - DCB Priority Group attributes
*
* @DCB_PG_ATTR_UNDEFINED: unspecified attribute to catch errors
* @DCB_PG_ATTR_TC_0: Priority Group Traffic Class 0 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_1: Priority Group Traffic Class 1 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_2: Priority Group Traffic Class 2 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_3: Priority Group Traffic Class 3 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_4: Priority Group Traffic Class 4 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_5: Priority Group Traffic Class 5 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_6: Priority Group Traffic Class 6 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_7: Priority Group Traffic Class 7 configuration (NLA_NESTED)
* @DCB_PG_ATTR_TC_MAX: highest attribute number currently defined
* @DCB_PG_ATTR_TC_ALL: apply to all traffic classes (NLA_NESTED)
* @DCB_PG_ATTR_BW_ID_0: Percent of link bandwidth for Priority Group 0 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_1: Percent of link bandwidth for Priority Group 1 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_2: Percent of link bandwidth for Priority Group 2 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_3: Percent of link bandwidth for Priority Group 3 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_4: Percent of link bandwidth for Priority Group 4 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_5: Percent of link bandwidth for Priority Group 5 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_6: Percent of link bandwidth for Priority Group 6 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_7: Percent of link bandwidth for Priority Group 7 (NLA_U8)
* @DCB_PG_ATTR_BW_ID_MAX: highest attribute number currently defined
* @DCB_PG_ATTR_BW_ID_ALL: apply to all priority groups (NLA_FLAG)
*
*/
enum dcbnl_pg_attrs {
DCB_PG_ATTR_UNDEFINED,
DCB_PG_ATTR_TC_0,
DCB_PG_ATTR_TC_1,
DCB_PG_ATTR_TC_2,
DCB_PG_ATTR_TC_3,
DCB_PG_ATTR_TC_4,
DCB_PG_ATTR_TC_5,
DCB_PG_ATTR_TC_6,
DCB_PG_ATTR_TC_7,
DCB_PG_ATTR_TC_MAX,
DCB_PG_ATTR_TC_ALL,
DCB_PG_ATTR_BW_ID_0,
DCB_PG_ATTR_BW_ID_1,
DCB_PG_ATTR_BW_ID_2,
DCB_PG_ATTR_BW_ID_3,
DCB_PG_ATTR_BW_ID_4,
DCB_PG_ATTR_BW_ID_5,
DCB_PG_ATTR_BW_ID_6,
DCB_PG_ATTR_BW_ID_7,
DCB_PG_ATTR_BW_ID_MAX,
DCB_PG_ATTR_BW_ID_ALL,
__DCB_PG_ATTR_ENUM_MAX,
DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1,
};
/**
* enum dcbnl_tc_attrs - DCB Traffic Class attributes
*
* @DCB_TC_ATTR_PARAM_UNDEFINED: unspecified attribute to catch errors
* @DCB_TC_ATTR_PARAM_PGID: (NLA_U8) Priority group the traffic class belongs to
* Valid values are: 0-7
* @DCB_TC_ATTR_PARAM_UP_MAPPING: (NLA_U8) Traffic class to user priority map
* Some devices may not support changing the
* user priority map of a TC.
* @DCB_TC_ATTR_PARAM_STRICT_PRIO: (NLA_U8) Strict priority setting
* 0 - none
* 1 - group strict
* 2 - link strict
* @DCB_TC_ATTR_PARAM_BW_PCT: optional - (NLA_U8) If supported by the device and
* not configured to use link strict priority,
* this is the percentage of bandwidth of the
* priority group this traffic class belongs to
* @DCB_TC_ATTR_PARAM_ALL: (NLA_FLAG) all traffic class parameters
*
*/
enum dcbnl_tc_attrs {
DCB_TC_ATTR_PARAM_UNDEFINED,
DCB_TC_ATTR_PARAM_PGID,
DCB_TC_ATTR_PARAM_UP_MAPPING,
DCB_TC_ATTR_PARAM_STRICT_PRIO,
DCB_TC_ATTR_PARAM_BW_PCT,
DCB_TC_ATTR_PARAM_ALL,
__DCB_TC_ATTR_PARAM_ENUM_MAX,
DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1,
};
/**
* enum dcbnl_cap_attrs - DCB Capability attributes
*
* @DCB_CAP_ATTR_UNDEFINED: unspecified attribute to catch errors
* @DCB_CAP_ATTR_ALL: (NLA_FLAG) all capability parameters
* @DCB_CAP_ATTR_PG: (NLA_U8) device supports Priority Groups
* @DCB_CAP_ATTR_PFC: (NLA_U8) device supports Priority Flow Control
* @DCB_CAP_ATTR_UP2TC: (NLA_U8) device supports user priority to
* traffic class mapping
* @DCB_CAP_ATTR_PG_TCS: (NLA_U8) bitmap where each bit represents a
* number of traffic classes the device
* can be configured to use for Priority Groups
* @DCB_CAP_ATTR_PFC_TCS: (NLA_U8) bitmap where each bit represents a
* number of traffic classes the device can be
* configured to use for Priority Flow Control
* @DCB_CAP_ATTR_GSP: (NLA_U8) device supports group strict priority
* @DCB_CAP_ATTR_BCN: (NLA_U8) device supports Backwards Congestion
* Notification
*/
enum dcbnl_cap_attrs {
DCB_CAP_ATTR_UNDEFINED,
DCB_CAP_ATTR_ALL,
DCB_CAP_ATTR_PG,
DCB_CAP_ATTR_PFC,
DCB_CAP_ATTR_UP2TC,
DCB_CAP_ATTR_PG_TCS,
DCB_CAP_ATTR_PFC_TCS,
DCB_CAP_ATTR_GSP,
DCB_CAP_ATTR_BCN,
__DCB_CAP_ATTR_ENUM_MAX,
DCB_CAP_ATTR_MAX = __DCB_CAP_ATTR_ENUM_MAX - 1,
};
/**
* enum dcbnl_numtcs_attrs - number of traffic classes
*
* @DCB_NUMTCS_ATTR_UNDEFINED: unspecified attribute to catch errors
* @DCB_NUMTCS_ATTR_ALL: (NLA_FLAG) all traffic class attributes
* @DCB_NUMTCS_ATTR_PG: (NLA_U8) number of traffic classes used for
* priority groups
* @DCB_NUMTCS_ATTR_PFC: (NLA_U8) number of traffic classes which can
* support priority flow control
*/
enum dcbnl_numtcs_attrs {
DCB_NUMTCS_ATTR_UNDEFINED,
DCB_NUMTCS_ATTR_ALL,
DCB_NUMTCS_ATTR_PG,
DCB_NUMTCS_ATTR_PFC,
__DCB_NUMTCS_ATTR_ENUM_MAX,
DCB_NUMTCS_ATTR_MAX = __DCB_NUMTCS_ATTR_ENUM_MAX - 1,
};
enum dcbnl_bcn_attrs{
DCB_BCN_ATTR_UNDEFINED = 0,
DCB_BCN_ATTR_RP_0,
DCB_BCN_ATTR_RP_1,
DCB_BCN_ATTR_RP_2,
DCB_BCN_ATTR_RP_3,
DCB_BCN_ATTR_RP_4,
DCB_BCN_ATTR_RP_5,
DCB_BCN_ATTR_RP_6,
DCB_BCN_ATTR_RP_7,
DCB_BCN_ATTR_RP_ALL,
DCB_BCN_ATTR_BCNA_0,
DCB_BCN_ATTR_BCNA_1,
DCB_BCN_ATTR_ALPHA,
DCB_BCN_ATTR_BETA,
DCB_BCN_ATTR_GD,
DCB_BCN_ATTR_GI,
DCB_BCN_ATTR_TMAX,
DCB_BCN_ATTR_TD,
DCB_BCN_ATTR_RMIN,
DCB_BCN_ATTR_W,
DCB_BCN_ATTR_RD,
DCB_BCN_ATTR_RU,
DCB_BCN_ATTR_WRTT,
DCB_BCN_ATTR_RI,
DCB_BCN_ATTR_C,
DCB_BCN_ATTR_ALL,
__DCB_BCN_ATTR_ENUM_MAX,
DCB_BCN_ATTR_MAX = __DCB_BCN_ATTR_ENUM_MAX - 1,
};
/**
* enum dcb_general_attr_values - general DCB attribute values
*
* @DCB_ATTR_UNDEFINED: value used to indicate an attribute is not supported
*
*/
enum dcb_general_attr_values {
DCB_ATTR_VALUE_UNDEFINED = 0xff
};
#endif /* __LINUX_DCBNL_H__ */

View File

@@ -168,6 +168,8 @@ enum {
DCCPO_MIN_CCID_SPECIFIC = 128,
DCCPO_MAX_CCID_SPECIFIC = 255,
};
/* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */
#define DCCP_SINGLE_OPT_MAXLEN 253
/* DCCP CCIDS */
enum {
@@ -176,29 +178,23 @@ enum {
};
/* DCCP features (RFC 4340 section 6.4) */
enum {
enum dccp_feature_numbers {
DCCPF_RESERVED = 0,
DCCPF_CCID = 1,
DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */
DCCPF_SHORT_SEQNOS = 2,
DCCPF_SEQUENCE_WINDOW = 3,
DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */
DCCPF_ECN_INCAPABLE = 4,
DCCPF_ACK_RATIO = 5,
DCCPF_SEND_ACK_VECTOR = 6,
DCCPF_SEND_NDP_COUNT = 7,
DCCPF_MIN_CSUM_COVER = 8,
DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */
DCCPF_DATA_CHECKSUM = 9,
/* 10-127 reserved */
DCCPF_MIN_CCID_SPECIFIC = 128,
DCCPF_SEND_LEV_RATE = 192, /* RFC 4342, sec. 8.4 */
DCCPF_MAX_CCID_SPECIFIC = 255,
};
/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
struct dccp_so_feat {
__u8 dccpsf_feat;
__u8 __user *dccpsf_val;
__u8 dccpsf_len;
};
/* DCCP socket options */
#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */
#define DCCP_SOCKOPT_SERVICE 2
@@ -208,6 +204,10 @@ struct dccp_so_feat {
#define DCCP_SOCKOPT_SERVER_TIMEWAIT 6
#define DCCP_SOCKOPT_SEND_CSCOV 10
#define DCCP_SOCKOPT_RECV_CSCOV 11
#define DCCP_SOCKOPT_AVAILABLE_CCIDS 12
#define DCCP_SOCKOPT_CCID 13
#define DCCP_SOCKOPT_TX_CCID 14
#define DCCP_SOCKOPT_RX_CCID 15
#define DCCP_SOCKOPT_CCID_RX_INFO 128
#define DCCP_SOCKOPT_CCID_TX_INFO 192
@@ -360,7 +360,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
#define DCCPF_INITIAL_ACK_RATIO 2
#define DCCPF_INITIAL_CCID DCCPC_CCID2
#define DCCPF_INITIAL_SEND_ACK_VECTOR 1
/* FIXME: for now we're default to 1 but it should really be 0 */
#define DCCPF_INITIAL_SEND_NDP_COUNT 1
@@ -370,20 +369,11 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
* Will be used to pass the state from dccp_request_sock to dccp_sock.
*
* @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
* @dccpms_ccid - Congestion Control Id (CCID) (section 10)
* @dccpms_send_ack_vector - Send Ack Vector Feature (section 11.5)
* @dccpms_send_ndp_count - Send NDP Count Feature (7.7.2)
* @dccpms_ack_ratio - Ack Ratio Feature (section 11.3)
* @dccpms_pending - List of features being negotiated
* @dccpms_conf -
*/
struct dccp_minisock {
__u64 dccpms_sequence_window;
__u8 dccpms_rx_ccid;
__u8 dccpms_tx_ccid;
__u8 dccpms_send_ack_vector;
__u8 dccpms_send_ndp_count;
__u8 dccpms_ack_ratio;
struct list_head dccpms_pending;
struct list_head dccpms_conf;
};
@@ -411,6 +401,7 @@ extern void dccp_minisock_init(struct dccp_minisock *dmsk);
* @dreq_iss: initial sequence number sent on the Response (RFC 4340, 7.1)
* @dreq_isr: initial sequence number received on the Request
* @dreq_service: service code present on the Request (there is just one)
* @dreq_featneg: feature negotiation options for this connection
* The following two fields are analogous to the ones in dccp_sock:
* @dreq_timestamp_echo: last received timestamp to echo (13.1)
* @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
@@ -420,6 +411,7 @@ struct dccp_request_sock {
__u64 dreq_iss;
__u64 dreq_isr;
__be32 dreq_service;
struct list_head dreq_featneg;
__u32 dreq_timestamp_echo;
__u32 dreq_timestamp_time;
};
@@ -493,10 +485,12 @@ struct dccp_ackvec;
* @dccps_r_ack_ratio - feature-remote Ack Ratio
* @dccps_pcslen - sender partial checksum coverage (via sockopt)
* @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
* @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
* @dccps_ndp_count - number of Non Data Packets since last data packet
* @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
* @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
* @dccps_minisock - associated minisock (accessed via dccp_msk)
* @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
* @dccps_hc_rx_ackvec - rx half connection ack vector
* @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
* @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
@@ -529,11 +523,13 @@ struct dccp_sock {
__u32 dccps_timestamp_time;
__u16 dccps_l_ack_ratio;
__u16 dccps_r_ack_ratio;
__u16 dccps_pcslen;
__u16 dccps_pcrlen;
__u8 dccps_pcslen:4;
__u8 dccps_pcrlen:4;
__u8 dccps_send_ndp_count:1;
__u64 dccps_ndp_count:48;
unsigned long dccps_rate_last;
struct dccp_minisock dccps_minisock;
struct list_head dccps_featneg;
struct dccp_ackvec *dccps_hc_rx_ackvec;
struct ccid *dccps_hc_rx_ccid;
struct ccid *dccps_hc_tx_ccid;

View File

@@ -44,6 +44,7 @@ extern const struct dmi_device * dmi_find_device(int type, const char *name,
extern void dmi_scan_machine(void);
extern int dmi_get_year(int field);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
extern int dmi_walk(void (*decode)(const struct dmi_header *));
@@ -56,6 +57,7 @@ static inline const struct dmi_device * dmi_find_device(int type, const char *na
static inline void dmi_scan_machine(void) { return; }
static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
static inline int dmi_walk(void (*decode)(const struct dmi_header *))
{ return -1; }

View File

@@ -27,6 +27,7 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <asm/unaligned.h>
#ifdef __KERNEL__
extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
@@ -41,6 +42,10 @@ extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh);
extern void eth_header_cache_update(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
extern int eth_mac_addr(struct net_device *dev, void *p);
extern int eth_change_mtu(struct net_device *dev, int new_mtu);
extern int eth_validate_addr(struct net_device *dev);
extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
@@ -136,6 +141,47 @@ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
BUILD_BUG_ON(ETH_ALEN != 6);
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
}
static inline unsigned long zap_last_2bytes(unsigned long value)
{
#ifdef __BIG_ENDIAN
return value >> 16;
#else
return value << 16;
#endif
}
/**
* compare_ether_addr_64bits - Compare two Ethernet addresses
* @addr1: Pointer to an array of 8 bytes
* @addr2: Pointer to an other array of 8 bytes
*
* Compare two ethernet addresses, returns 0 if equal.
* Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional
* branches, and possibly long word memory accesses on CPU allowing cheap
* unaligned memory reads.
* arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2}
*
* Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
*/
static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
const u8 addr2[6+2])
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
unsigned long fold = ((*(unsigned long *)addr1) ^
(*(unsigned long *)addr2));
if (sizeof(fold) == 8)
return zap_last_2bytes(fold) != 0;
fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
(*(unsigned long *)(addr2 + 4)));
return fold != 0;
#else
return compare_ether_addr(addr1, addr2);
#endif
}
#endif /* __KERNEL__ */
#endif /* _LINUX_ETHERDEVICE_H */

View File

@@ -467,6 +467,8 @@ struct ethtool_ops {
#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET

View File

@@ -888,7 +888,7 @@ struct fb_info {
#define fb_writeq sbus_writeq
#define fb_memset sbus_memset_io
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__)
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw

View File

@@ -27,6 +27,7 @@
#ifdef __KERNEL__
extern __be16 fddi_type_trans(struct sk_buff *skb,
struct net_device *dev);
extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
extern struct net_device *alloc_fddidev(int sizeof_priv);
#endif

View File

@@ -122,7 +122,8 @@ struct sock_fprog /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_PKTTYPE 4
#define SKF_AD_IFINDEX 8
#define SKF_AD_NLATTR 12
#define SKF_AD_MAX 16
#define SKF_AD_NLATTR_NEST 16
#define SKF_AD_MAX 20
#define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000)

View File

@@ -316,6 +316,7 @@ struct poll_table_struct;
struct kstatfs;
struct vm_area_struct;
struct vfsmount;
struct cred;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -827,7 +828,7 @@ struct file {
fmode_t f_mode;
loff_t f_pos;
struct fown_struct f_owner;
unsigned int f_uid, f_gid;
const struct cred *f_cred;
struct file_ra_state f_ra;
u64 f_version;
@@ -1194,7 +1195,7 @@ enum {
#define has_fs_excl() atomic_read(&current->fs_excl)
#define is_owner_or_cap(inode) \
((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER))
((current_fsuid() == (inode)->i_uid) || capable(CAP_FOWNER))
/* not quite ready to be deprecated, but... */
extern void lock_super(struct super_block *);
@@ -1674,7 +1675,8 @@ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
extern long do_sys_open(int dfd, const char __user *filename, int flags,
int mode);
extern struct file *filp_open(const char *, int, int);
extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
extern char * getname(const char __user *);

View File

@@ -47,12 +47,7 @@
struct gianfar_platform_data {
/* device specific information */
u32 device_flags;
/* board specific information */
u32 board_flags;
int mdio_bus; /* Bus controlled by us */
char bus_id[MII_BUS_ID_SIZE]; /* Bus PHY is on */
u32 phy_id;
u8 mac_addr[6];
char bus_id[BUS_ID_SIZE];
phy_interface_t interface;
};
@@ -61,17 +56,6 @@ struct gianfar_mdio_data {
int irq[32];
};
/* Flags related to gianfar device features */
#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
/* Flags in gianfar_platform_data */
#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */
#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */

View File

@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#ifdef CONFIG_FUNCTION_TRACER
@@ -24,6 +26,45 @@ struct ftrace_ops {
struct ftrace_ops *next;
};
extern int function_trace_stop;
/*
* Type of the current tracing.
*/
enum ftrace_tracing_type_t {
FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
FTRACE_TYPE_RETURN, /* Hook the return of the function */
};
/* Current tracing type, default is FTRACE_TYPE_ENTER */
extern enum ftrace_tracing_type_t ftrace_tracing_type;
/**
* ftrace_stop - stop function tracer.
*
* A quick way to stop the function tracer. Note this an on off switch,
* it is not something that is recursive like preempt_disable.
* This does not disable the calling of mcount, it only stops the
* calling of functions from mcount.
*/
static inline void ftrace_stop(void)
{
function_trace_stop = 1;
}
/**
* ftrace_start - start the function tracer.
*
* This function is the inverse of ftrace_stop. This does not enable
* the function tracing if the function tracer is disabled. This only
* sets the function tracer flag to continue calling the functions
* from mcount.
*/
static inline void ftrace_start(void)
{
function_trace_stop = 0;
}
/*
* The ftrace_ops must be a static and should also
* be read_mostly. These functions do modify read_mostly variables
@@ -42,9 +83,21 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
# define unregister_ftrace_function(ops) do { } while (0)
# define clear_ftrace_function(ops) do { } while (0)
static inline void ftrace_kill(void) { }
static inline void ftrace_stop(void) { }
static inline void ftrace_start(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
int
stack_trace_sysctl(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *lenp,
loff_t *ppos);
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
#include <asm/ftrace.h>
enum {
FTRACE_FL_FREE = (1 << 0),
@@ -60,6 +113,7 @@ struct dyn_ftrace {
struct list_head list;
unsigned long ip; /* address of mcount call-site */
unsigned long flags;
struct dyn_arch_ftrace arch;
};
int ftrace_force_update(void);
@@ -67,19 +121,25 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset);
/* defined in arch */
extern int ftrace_ip_converted(unsigned long ip);
extern unsigned char *ftrace_nop_replace(void);
extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
extern int ftrace_dyn_arch_init(void *data);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void);
extern int ftrace_disable_ftrace_graph_caller(void);
#else
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
#endif
/**
* ftrace_modify_code - modify code segment
* @ip: the address of the code segment
* @old_code: the contents of what is expected to be there
* @new_code: the code to patch in
* ftrace_make_nop - convert code into top
* @mod: module structure if called by module load initialization
* @rec: the mcount call site record
* @addr: the address that the call site should be calling
*
* This is a very sensitive operation and great care needs
* to be taken by the arch. The operation should carefully
@@ -87,6 +147,8 @@ extern void mcount_call(void);
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
* The code segment at @rec->ip should be a caller to @addr
*
* Return must be:
* 0 on success
* -EFAULT on error reading the location
@@ -94,8 +156,34 @@ extern void mcount_call(void);
* -EPERM on error writing to the location
* Any other value will be considered a failure.
*/
extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code);
extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
/**
* ftrace_make_call - convert a nop call site into a call to addr
* @rec: the mcount call site record
* @addr: the address that the call site should call
*
* This is a very sensitive operation and great care needs
* to be taken by the arch. The operation should carefully
* read the location, check to see if what is read is indeed
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
* The code segment at @rec->ip should be a nop
*
* Return must be:
* 0 on success
* -EFAULT on error reading the location
* -EINVAL on a failed compare of the contents
* -EPERM on error writing to the location
* Any other value will be considered a failure.
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
@@ -103,7 +191,6 @@ extern void ftrace_release(void *start, unsigned long size);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else
# define skip_trace(ip) ({ 0; })
# define ftrace_force_update() ({ 0; })
@@ -182,6 +269,12 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
#ifdef CONFIG_TRACING
extern int ftrace_dump_on_oops;
extern void tracing_start(void);
extern void tracing_stop(void);
extern void ftrace_off_permanent(void);
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
@@ -210,8 +303,11 @@ extern void ftrace_dump(void);
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
static inline int
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
static inline int
ftrace_printk(const char *fmt, ...)
{
@@ -222,33 +318,178 @@ static inline void ftrace_dump(void) { }
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
extern void ftrace_init_module(unsigned long *start, unsigned long *end);
extern void ftrace_init_module(struct module *mod,
unsigned long *start, unsigned long *end);
#else
static inline void ftrace_init(void) { }
static inline void
ftrace_init_module(unsigned long *start, unsigned long *end) { }
ftrace_init_module(struct module *mod,
unsigned long *start, unsigned long *end) { }
#endif
struct boot_trace {
pid_t caller;
char func[KSYM_SYMBOL_LEN];
int result;
unsigned long long duration; /* usecs */
ktime_t calltime;
ktime_t rettime;
enum {
POWER_NONE = 0,
POWER_CSTATE = 1,
POWER_PSTATE = 2,
};
#ifdef CONFIG_BOOT_TRACER
extern void trace_boot(struct boot_trace *it, initcall_t fn);
extern void start_boot_trace(void);
extern void stop_boot_trace(void);
struct power_trace {
#ifdef CONFIG_POWER_TRACER
ktime_t stamp;
ktime_t end;
int type;
int state;
#endif
};
#ifdef CONFIG_POWER_TRACER
extern void trace_power_start(struct power_trace *it, unsigned int type,
unsigned int state);
extern void trace_power_mark(struct power_trace *it, unsigned int type,
unsigned int state);
extern void trace_power_end(struct power_trace *it);
#else
static inline void trace_boot(struct boot_trace *it, initcall_t fn) { }
static inline void start_boot_trace(void) { }
static inline void stop_boot_trace(void) { }
static inline void trace_power_start(struct power_trace *it, unsigned int type,
unsigned int state) { }
static inline void trace_power_mark(struct power_trace *it, unsigned int type,
unsigned int state) { }
static inline void trace_power_end(struct power_trace *it) { }
#endif
/*
* Structure that defines an entry function trace.
*/
struct ftrace_graph_ent {
unsigned long func; /* Current function */
int depth;
};
/*
* Structure that defines a return function trace.
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
unsigned long long calltime;
unsigned long long rettime;
/* Number of functions that overran the depth limit for current task */
unsigned long overrun;
int depth;
};
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Sometimes we don't want to trace a function with the function
* graph tracer but we want them to keep traced by the usual function
* tracer if the function graph tracer is not configured.
*/
#define __notrace_funcgraph notrace
/*
* We want to which function is an entrypoint of a hardirq.
* That will help us to put a signal on output.
*/
#define __irq_entry __attribute__((__section__(".irqentry.text")))
/* Limits of hardirq entrypoints */
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc);
extern void ftrace_graph_stop(void);
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
extern void unregister_ftrace_graph(void);
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
static inline int task_curr_ret_stack(struct task_struct *t)
{
return t->curr_ret_stack;
}
static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
}
static inline void unpause_graph_tracing(void)
{
atomic_dec(&current->tracing_graph_pause);
}
#else
#define __notrace_funcgraph
#define __irq_entry
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
return -1;
}
static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
#endif
#ifdef CONFIG_TRACING
#include <linux/sched.h>
/* flags for current->trace */
enum {
TSK_TRACE_FL_TRACE_BIT = 0,
TSK_TRACE_FL_GRAPH_BIT = 1,
};
enum {
TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
};
static inline void set_tsk_trace_trace(struct task_struct *tsk)
{
set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}
static inline void clear_tsk_trace_trace(struct task_struct *tsk)
{
clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}
static inline int test_tsk_trace_trace(struct task_struct *tsk)
{
return tsk->trace & TSK_TRACE_FL_TRACE;
}
static inline void set_tsk_trace_graph(struct task_struct *tsk)
{
set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}
static inline void clear_tsk_trace_graph(struct task_struct *tsk)
{
clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}
static inline int test_tsk_trace_graph(struct task_struct *tsk)
{
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
#endif /* CONFIG_TRACING */
#endif /* _LINUX_FTRACE_H */

View File

@@ -0,0 +1,13 @@
#ifndef _LINUX_FTRACE_IRQ_H
#define _LINUX_FTRACE_IRQ_H
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_exit(void);
#else
static inline void ftrace_nmi_enter(void) { }
static inline void ftrace_nmi_exit(void) { }
#endif
#endif /* _LINUX_FTRACE_IRQ_H */

View File

@@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <linux/smp_lock.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <asm/hardirq.h>
#include <asm/system.h>
@@ -161,7 +162,17 @@ extern void irq_enter(void);
*/
extern void irq_exit(void);
#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0)
#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
#define nmi_enter() \
do { \
ftrace_nmi_enter(); \
lockdep_off(); \
__irq_enter(); \
} while (0)
#define nmi_exit() \
do { \
__irq_exit(); \
lockdep_on(); \
ftrace_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */

View File

@@ -43,7 +43,7 @@ struct hdlc_proto {
};
/* Pointed to by dev->priv */
/* Pointed to by netdev_priv(dev) */
typedef struct hdlc_device {
/* used by HDLC layer to take control over HDLC device from hw driver*/
int (*attach)(struct net_device *dev,
@@ -80,7 +80,7 @@ struct net_device *alloc_hdlcdev(void *priv);
static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev)
{
return dev->priv;
return netdev_priv(dev);
}
static __inline__ void debug_frame(const struct sk_buff *skb)

View File

@@ -32,7 +32,9 @@ struct hippi_cb {
};
extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
extern int hippi_mac_addr(struct net_device *dev, void *p);
extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
extern struct net_device *alloc_hippi_dev(int sizeof_priv);
#endif

View File

@@ -12,8 +12,8 @@
* published by the Free Software Foundation.
*/
#ifndef IEEE80211_H
#define IEEE80211_H
#ifndef LINUX_IEEE80211_H
#define LINUX_IEEE80211_H
#include <linux/types.h>
#include <asm/byteorder.h>
@@ -97,7 +97,10 @@
#define IEEE80211_MAX_FRAME_LEN 2352
#define IEEE80211_MAX_SSID_LEN 32
#define IEEE80211_MAX_MESH_ID_LEN 32
#define IEEE80211_MESH_CONFIG_LEN 19
#define IEEE80211_QOS_CTL_LEN 2
#define IEEE80211_QOS_CTL_TID_MASK 0x000F
#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
@@ -666,6 +669,13 @@ struct ieee80211_cts {
u8 ra[6];
} __attribute__ ((packed));
struct ieee80211_pspoll {
__le16 frame_control;
__le16 aid;
u8 bssid[6];
u8 ta[6];
} __attribute__ ((packed));
/**
* struct ieee80211_bar - HT Block Ack Request
*
@@ -685,28 +695,88 @@ struct ieee80211_bar {
#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
#define IEEE80211_HT_MCS_MASK_LEN 10
/**
* struct ieee80211_mcs_info - MCS information
* @rx_mask: RX mask
* @rx_highest: highest supported RX rate
* @tx_params: TX parameters
*/
struct ieee80211_mcs_info {
u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
__le16 rx_highest;
u8 tx_params;
u8 reserved[3];
} __attribute__((packed));
/* 802.11n HT capability MSC set */
#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
#define IEEE80211_HT_MCS_TX_DEFINED 0x01
#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
/* value 0 == 1 stream etc */
#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
/*
* 802.11n D5.0 20.3.5 / 20.6 says:
* - indices 0 to 7 and 32 are single spatial stream
* - 8 to 31 are multiple spatial streams using equal modulation
* [8..15 for two streams, 16..23 for three and 24..31 for four]
* - remainder are multiple spatial streams using unequal modulation
*/
#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
(IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
/**
* struct ieee80211_ht_cap - HT capabilities
*
* This structure refers to "HT capabilities element" as
* described in 802.11n draft section 7.3.2.52
* This structure is the "HT capabilities element" as
* described in 802.11n D5.0 7.3.2.57
*/
struct ieee80211_ht_cap {
__le16 cap_info;
u8 ampdu_params_info;
u8 supp_mcs_set[16];
/* 16 bytes MCS information */
struct ieee80211_mcs_info mcs;
__le16 extended_ht_cap_info;
__le32 tx_BF_cap_info;
u8 antenna_selection_info;
} __attribute__ ((packed));
/* 802.11n HT capabilities masks (for cap_info) */
#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
#define IEEE80211_HT_CAP_SM_PS 0x000C
#define IEEE80211_HT_CAP_GRN_FLD 0x0010
#define IEEE80211_HT_CAP_SGI_20 0x0020
#define IEEE80211_HT_CAP_SGI_40 0x0040
#define IEEE80211_HT_CAP_TX_STBC 0x0080
#define IEEE80211_HT_CAP_RX_STBC 0x0300
#define IEEE80211_HT_CAP_DELAY_BA 0x0400
#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
#define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000
#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
/**
* struct ieee80211_ht_cap - HT additional information
* struct ieee80211_ht_info - HT information
*
* This structure refers to "HT information element" as
* described in 802.11n draft section 7.3.2.53
* This structure is the "HT information element" as
* described in 802.11n D5.0 7.3.2.58
*/
struct ieee80211_ht_addt_info {
struct ieee80211_ht_info {
u8 control_chan;
u8 ht_param;
__le16 operation_mode;
@@ -714,36 +784,33 @@ struct ieee80211_ht_addt_info {
u8 basic_set[16];
} __attribute__ ((packed));
/* 802.11n HT capabilities masks */
#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
#define IEEE80211_HT_CAP_SM_PS 0x000C
#define IEEE80211_HT_CAP_GRN_FLD 0x0010
#define IEEE80211_HT_CAP_SGI_20 0x0020
#define IEEE80211_HT_CAP_SGI_40 0x0040
#define IEEE80211_HT_CAP_DELAY_BA 0x0400
#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
/* 802.11n HT capability AMPDU settings */
#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
/* 802.11n HT capability MSC set */
#define IEEE80211_SUPP_MCS_SET_UEQM 4
#define IEEE80211_HT_CAP_MAX_STREAMS 4
#define IEEE80211_SUPP_MCS_SET_LEN 10
/* maximum streams the spec allows */
#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01
#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02
#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C
#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10
/* 802.11n HT IE masks */
#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03
#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00
#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01
#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03
#define IEEE80211_HT_IE_CHA_WIDTH 0x04
#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
/* for ht_param */
#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10
#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0
/* for operation_mode */
#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
/* for stbc_param */
#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
@@ -769,7 +836,6 @@ struct ieee80211_ht_addt_info {
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
#define WLAN_AUTH_FAST_BSS_TRANSITION 2
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -949,7 +1015,7 @@ enum ieee80211_eid {
WLAN_EID_EXT_SUPP_RATES = 50,
/* 802.11n */
WLAN_EID_HT_CAPABILITY = 45,
WLAN_EID_HT_EXTRA_INFO = 61,
WLAN_EID_HT_INFORMATION = 61,
/* 802.11i */
WLAN_EID_RSN = 48,
WLAN_EID_WPA = 221,
@@ -976,6 +1042,68 @@ enum ieee80211_spectrum_mgmt_actioncode {
WLAN_ACTION_SPCT_CHL_SWITCH = 4,
};
/*
* IEEE 802.11-2007 7.3.2.9 Country information element
*
* Minimum length is 8 octets, ie len must be evenly
* divisible by 2
*/
/* Although the spec says 8 I'm seeing 6 in practice */
#define IEEE80211_COUNTRY_IE_MIN_LEN 6
/*
* For regulatory extension stuff see IEEE 802.11-2007
* Annex I (page 1141) and Annex J (page 1147). Also
* review 7.3.2.9.
*
* When dot11RegulatoryClassesRequired is true and the
* first_channel/reg_extension_id is >= 201 then the IE
* compromises of the 'ext' struct represented below:
*
* - Regulatory extension ID - when generating IE this just needs
* to be monotonically increasing for each triplet passed in
* the IE
* - Regulatory class - index into set of rules
* - Coverage class - index into air propagation time (Table 7-27),
* in microseconds, you can compute the air propagation time from
* the index by multiplying by 3, so index 10 yields a propagation
* of 10 us. Valid values are 0-31, values 32-255 are not defined
* yet. A value of 0 inicates air propagation of <= 1 us.
*
* See also Table I.2 for Emission limit sets and table
* I.3 for Behavior limit sets. Table J.1 indicates how to map
* a reg_class to an emission limit set and behavior limit set.
*/
#define IEEE80211_COUNTRY_EXTENSION_ID 201
/*
* Channels numbers in the IE must be monotonically increasing
* if dot11RegulatoryClassesRequired is not true.
*
* If dot11RegulatoryClassesRequired is true consecutive
* subband triplets following a regulatory triplet shall
* have monotonically increasing first_channel number fields.
*
* Channel numbers shall not overlap.
*
* Note that max_power is signed.
*/
struct ieee80211_country_ie_triplet {
union {
struct {
u8 first_channel;
u8 num_channels;
s8 max_power;
} __attribute__ ((packed)) chans;
struct {
u8 reg_extension_id;
u8 reg_class;
u8 coverage_class;
} __attribute__ ((packed)) ext;
};
} __attribute__ ((packed));
/* BACK action code */
enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
@@ -1057,4 +1185,4 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
return hdr->addr1;
}
#endif /* IEEE80211_H */
#endif /* LINUX_IEEE80211_H */

View File

@@ -65,6 +65,7 @@
#define IFF_BONDING 0x20 /* bonding master or slave */
#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */
#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002

View File

@@ -87,6 +87,9 @@
#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */
#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
#define ARPHRD_PHONET 820 /* PhoNet media type */
#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
#define ARPHRD_NONE 0xFFFE /* zero header length */

View File

@@ -80,6 +80,10 @@ struct in_addr {
/* BSD compatibility */
#define IP_RECVRETOPTS IP_RETOPTS
/* TProxy original addresses */
#define IP_ORIGDSTADDR 20
#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
#define IP_PMTUDISC_WANT 1 /* Use per route hints */

View File

@@ -57,7 +57,6 @@ extern struct nsproxy init_nsproxy;
.mnt_ns = NULL, \
INIT_NET_NS(net_ns) \
INIT_IPC_NS(ipc_ns) \
.user_ns = &init_user_ns, \
}
#define INIT_SIGHAND(sighand) { \
@@ -113,6 +112,8 @@ extern struct group_info init_groups;
# define CAP_INIT_BSET CAP_INIT_EFF_SET
#endif
extern struct cred init_cred;
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -147,13 +148,10 @@ extern struct group_info init_groups;
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
.group_info = &init_groups, \
.cap_effective = CAP_INIT_EFF_SET, \
.cap_inheritable = CAP_INIT_INH_SET, \
.cap_permitted = CAP_FULL_SET, \
.cap_bset = CAP_INIT_BSET, \
.securebits = SECUREBITS_DEFAULT, \
.user = INIT_USER, \
.real_cred = &init_cred, \
.cred = &init_cred, \
.cred_exec_mutex = \
__MUTEX_INITIALIZER(tsk.cred_exec_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \

View File

@@ -659,6 +659,8 @@ struct input_absinfo {
#define SW_RADIO SW_RFKILL_ALL /* deprecated */
#define SW_MICROPHONE_INSERT 0x04 /* set = inserted */
#define SW_DOCK 0x05 /* set = plugged into dock */
#define SW_LINEOUT_INSERT 0x06 /* set = inserted */
#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)

View File

@@ -278,6 +278,7 @@ struct ipv6_pinfo {
struct in6_addr saddr;
struct in6_addr rcv_saddr;
struct in6_addr daddr;
struct in6_pktinfo sticky_pktinfo;
struct in6_addr *daddr_cache;
#ifdef CONFIG_IPV6_SUBTREES
struct in6_addr *saddr_cache;

View File

@@ -361,18 +361,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
((unsigned char *)&addr)[3]
#define NIPQUAD_FMT "%u.%u.%u.%u"
#define NIP6(addr) \
ntohs((addr).s6_addr16[0]), \
ntohs((addr).s6_addr16[1]), \
ntohs((addr).s6_addr16[2]), \
ntohs((addr).s6_addr16[3]), \
ntohs((addr).s6_addr16[4]), \
ntohs((addr).s6_addr16[5]), \
ntohs((addr).s6_addr16[6]), \
ntohs((addr).s6_addr16[7])
#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
#if defined(__LITTLE_ENDIAN)
#define HIPQUAD(addr) \
((unsigned char *)&addr)[3], \

View File

@@ -100,6 +100,10 @@ struct kimage {
#define KEXEC_TYPE_DEFAULT 0
#define KEXEC_TYPE_CRASH 1
unsigned int preserve_context : 1;
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
#endif
};

View File

@@ -1,66 +0,0 @@
/* key-ui.h: key userspace interface stuff
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_KEY_UI_H
#define _LINUX_KEY_UI_H
#include <linux/key.h>
/* the key tree */
extern struct rb_root key_serial_tree;
extern spinlock_t key_serial_lock;
/* required permissions */
#define KEY_VIEW 0x01 /* require permission to view attributes */
#define KEY_READ 0x02 /* require permission to read content */
#define KEY_WRITE 0x04 /* require permission to update / modify */
#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
#define KEY_LINK 0x10 /* require permission to link */
#define KEY_SETATTR 0x20 /* require permission to change attributes */
#define KEY_ALL 0x3f /* all the above permissions */
/*
* the keyring payload contains a list of the keys to which the keyring is
* subscribed
*/
struct keyring_list {
struct rcu_head rcu; /* RCU deletion hook */
unsigned short maxkeys; /* max keys this list can hold */
unsigned short nkeys; /* number of keys currently held */
unsigned short delkey; /* key to be unlinked by RCU */
struct key *keys[0];
};
/*
* check to see whether permission is granted to use a key in the desired way
*/
extern int key_task_permission(const key_ref_t key_ref,
struct task_struct *context,
key_perm_t perm);
static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
{
return key_task_permission(key_ref, current, perm);
}
extern key_ref_t lookup_user_key(struct task_struct *context,
key_serial_t id, int create, int partial,
key_perm_t perm);
extern long join_session_keyring(const char *name);
extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
#endif /* _LINUX_KEY_UI_H */

View File

@@ -73,6 +73,7 @@ struct key;
struct seq_file;
struct user_struct;
struct signal_struct;
struct cred;
struct key_type;
struct key_owner;
@@ -181,7 +182,7 @@ struct key {
extern struct key *key_alloc(struct key_type *type,
const char *desc,
uid_t uid, gid_t gid,
struct task_struct *ctx,
const struct cred *cred,
key_perm_t perm,
unsigned long flags);
@@ -249,7 +250,7 @@ extern int key_unlink(struct key *keyring,
struct key *key);
extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
struct task_struct *ctx,
const struct cred *cred,
unsigned long flags,
struct key *dest);
@@ -276,24 +277,11 @@ extern ctl_table key_sysctls[];
/*
* the userspace interface
*/
extern void switch_uid_keyring(struct user_struct *new_user);
extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk);
extern int copy_thread_group_keys(struct task_struct *tsk);
extern void exit_keys(struct task_struct *tsk);
extern void exit_thread_group_keys(struct signal_struct *tg);
extern int suid_keys(struct task_struct *tsk);
extern int exec_keys(struct task_struct *tsk);
extern int install_thread_keyring_to_cred(struct cred *cred);
extern void key_fsuid_changed(struct task_struct *tsk);
extern void key_fsgid_changed(struct task_struct *tsk);
extern void key_init(void);
#define __install_session_keyring(tsk, keyring) \
({ \
struct key *old_session = tsk->signal->session_keyring; \
tsk->signal->session_keyring = keyring; \
old_session; \
})
#else /* CONFIG_KEYS */
#define key_validate(k) 0
@@ -302,17 +290,9 @@ extern void key_init(void);
#define key_revoke(k) do { } while(0)
#define key_put(k) do { } while(0)
#define key_ref_put(k) do { } while(0)
#define make_key_ref(k, p) ({ NULL; })
#define key_ref_to_ptr(k) ({ NULL; })
#define make_key_ref(k, p) NULL
#define key_ref_to_ptr(k) NULL
#define is_key_possessed(k) 0
#define switch_uid_keyring(u) do { } while(0)
#define __install_session_keyring(t, k) ({ NULL; })
#define copy_keys(f,t) 0
#define copy_thread_group_keys(t) 0
#define exit_keys(t) do { } while(0)
#define exit_thread_group_keys(tg) do { } while(0)
#define suid_keys(t) do { } while(0)
#define exec_keys(t) do { } while(0)
#define key_fsuid_changed(t) do { } while(0)
#define key_fsgid_changed(t) do { } while(0)
#define key_init() do { } while(0)

View File

@@ -1,6 +1,6 @@
/* keyctl.h: keyctl command IDs
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Copyright (C) 2004, 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
#define KEY_SPEC_REQKEY_AUTH_KEY -7 /* - key ID for assumed request_key auth key */
#define KEY_SPEC_REQUESTOR_KEYRING -8 /* - key ID for request_key() dest keyring */
/* request-key default keyrings */
#define KEY_REQKEY_DEFL_NO_CHANGE -1
@@ -30,6 +31,7 @@
#define KEY_REQKEY_DEFL_USER_KEYRING 4
#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5
#define KEY_REQKEY_DEFL_GROUP_KEYRING 6
#define KEY_REQKEY_DEFL_REQUESTOR_KEYRING 7
/* keyctl commands */
#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */

View File

@@ -64,14 +64,6 @@
name:
#endif
#define KPROBE_ENTRY(name) \
.pushsection .kprobes.text, "ax"; \
ENTRY(name)
#define KPROBE_END(name) \
END(name); \
.popsection
#ifndef END
#define END(name) \
.size name, .-name

View File

@@ -0,0 +1,94 @@
#ifndef _LINUX_LIST_NULLS_H
#define _LINUX_LIST_NULLS_H
/*
* Special version of lists, where end of list is not a NULL pointer,
* but a 'nulls' marker, which can have many different values.
* (up to 2^31 different values guaranteed on all platforms)
*
* In the standard hlist, termination of a list is the NULL pointer.
* In this special 'nulls' variant, we use the fact that objects stored in
* a list are aligned on a word (4 or 8 bytes alignment).
* We therefore use the last significant bit of 'ptr' :
* Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1)
* Set to 0 : This is a pointer to some object (ptr)
*/
struct hlist_nulls_head {
struct hlist_nulls_node *first;
};
struct hlist_nulls_node {
struct hlist_nulls_node *next, **pprev;
};
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
/**
* ptr_is_a_nulls - Test if a ptr is a nulls
* @ptr: ptr to be tested
*
*/
static inline int is_a_nulls(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr & 1);
}
/**
* get_nulls_value - Get the 'nulls' value of the end of chain
* @ptr: end of chain
*
* Should be called only if is_a_nulls(ptr);
*/
static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr) >> 1;
}
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(h->first);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
{
struct hlist_nulls_node *next = n->next;
struct hlist_nulls_node **pprev = n->pprev;
*pprev = next;
if (!is_a_nulls(next))
next->pprev = pprev;
}
/**
* hlist_nulls_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
*/
#define hlist_nulls_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*
*/
#define hlist_nulls_for_each_entry_from(tpos, pos, member) \
for (; (!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
#endif

View File

@@ -12,6 +12,7 @@
* See the file COPYING for more details.
*/
#include <stdarg.h>
#include <linux/types.h>
struct module;
@@ -48,10 +49,28 @@ struct marker {
void (*call)(const struct marker *mdata, void *call_private, ...);
struct marker_probe_closure single;
struct marker_probe_closure *multi;
const char *tp_name; /* Optional tracepoint name */
void *tp_cb; /* Optional tracepoint callback */
} __attribute__((aligned(8)));
#ifdef CONFIG_MARKERS
#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \
static const char __mstrtab_##name[] \
__attribute__((section("__markers_strings"))) \
= #name "\0" format; \
static struct marker __mark_##name \
__attribute__((section("__markers"), aligned(8))) = \
{ __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
0, 0, marker_probe_cb, { __mark_empty_function, NULL},\
NULL, tp_name_str, tp_cb }
#define DEFINE_MARKER(name, format) \
_DEFINE_MARKER(name, NULL, NULL, format)
#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \
_DEFINE_MARKER(name, #tp_name, tp_cb, format)
/*
* Note : the empty asm volatile with read constraint is used here instead of a
* "used" attribute to fix a gcc 4.1.x bug.
@@ -65,14 +84,7 @@ struct marker {
*/
#define __trace_mark(generic, name, call_private, format, args...) \
do { \
static const char __mstrtab_##name[] \
__attribute__((section("__markers_strings"))) \
= #name "\0" format; \
static struct marker __mark_##name \
__attribute__((section("__markers"), aligned(8))) = \
{ __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
0, 0, marker_probe_cb, \
{ __mark_empty_function, NULL}, NULL }; \
DEFINE_MARKER(name, format); \
__mark_check_format(format, ## args); \
if (unlikely(__mark_##name.state)) { \
(*__mark_##name.call) \
@@ -80,14 +92,39 @@ struct marker {
} \
} while (0)
#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
do { \
void __check_tp_type(void) \
{ \
register_trace_##tp_name(tp_cb); \
} \
DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \
__mark_check_format(format, ## args); \
(*__mark_##name.call)(&__mark_##name, call_private, \
## args); \
} while (0)
extern void marker_update_probe_range(struct marker *begin,
struct marker *end);
#define GET_MARKER(name) (__mark_##name)
#else /* !CONFIG_MARKERS */
#define DEFINE_MARKER(name, tp_name, tp_cb, format)
#define __trace_mark(generic, name, call_private, format, args...) \
__mark_check_format(format, ## args)
#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
do { \
void __check_tp_type(void) \
{ \
register_trace_##tp_name(tp_cb); \
} \
__mark_check_format(format, ## args); \
} while (0)
static inline void marker_update_probe_range(struct marker *begin,
struct marker *end)
{ }
#define GET_MARKER(name)
#endif /* CONFIG_MARKERS */
/**
@@ -116,6 +153,20 @@ static inline void marker_update_probe_range(struct marker *begin,
#define _trace_mark(name, format, args...) \
__trace_mark(1, name, NULL, format, ## args)
/**
* trace_mark_tp - Marker in a tracepoint callback
* @name: marker name, not quoted.
* @tp_name: tracepoint name, not quoted.
* @tp_cb: tracepoint callback. Should have an associated global symbol so it
* is not optimized away by the compiler (should not be static).
* @format: format string
* @args...: variable argument list
*
* Places a marker in a tracepoint callback.
*/
#define trace_mark_tp(name, tp_name, tp_cb, format, args...) \
__trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args)
/**
* MARK_NOARGS - Format string for a marker with no argument.
*/
@@ -136,8 +187,6 @@ extern marker_probe_func __mark_empty_function;
extern void marker_probe_cb(const struct marker *mdata,
void *call_private, ...);
extern void marker_probe_cb_noarg(const struct marker *mdata,
void *call_private, ...);
/*
* Connect a probe to a marker.
@@ -162,8 +211,10 @@ extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
/*
* marker_synchronize_unregister must be called between the last marker probe
* unregistration and the end of module exit to make sure there is no caller
* executing a probe when it is freed.
* unregistration and the first one of
* - the end of module exit function
* - the free of any resource used by the probes
* to ensure the code and data are valid for any possibly running probes.
*/
#define marker_synchronize_unregister() synchronize_sched()

25
include/linux/mdio-gpio.h Normal file
View File

@@ -0,0 +1,25 @@
/*
* MDIO-GPIO bus platform data structures
*
* Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __LINUX_MDIO_GPIO_H
#define __LINUX_MDIO_GPIO_H
#include <linux/mdio-bitbang.h>
struct mdio_gpio_platform_data {
/* GPIO numbers for bus pins */
unsigned int mdc;
unsigned int mdio;
unsigned int phy_mask;
int irqs[PHY_MAX_ADDR];
};
#endif /* __LINUX_MDIO_GPIO_H */

View File

@@ -1,7 +1,7 @@
/*
* audio.h -- Audio Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
* Copyright 2007, 2008 Wolfson Microelectronics PLC
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -70,9 +70,9 @@
#define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */
#define WM8350_VMID_OFF 0
#define WM8350_VMID_500K 1
#define WM8350_VMID_100K 2
#define WM8350_VMID_10K 3
#define WM8350_VMID_300K 1
#define WM8350_VMID_50K 2
#define WM8350_VMID_5K 3
/*
* R40 (0x28) - Clock Control 1
@@ -591,8 +591,38 @@
#define WM8350_IRQ_CODEC_MICSCD 41
#define WM8350_IRQ_CODEC_MICD 42
/*
* WM8350 Platform data.
*
* This must be initialised per platform for best audio performance.
* Please see WM8350 datasheet for information.
*/
struct wm8350_audio_platform_data {
int vmid_discharge_msecs; /* VMID --> OFF discharge time */
int drain_msecs; /* OFF drain time */
int cap_discharge_msecs; /* Cap ON (from OFF) discharge time */
int vmid_charge_msecs; /* vmid power up time */
u32 vmid_s_curve:2; /* vmid enable s curve speed */
u32 dis_out4:2; /* out4 discharge speed */
u32 dis_out3:2; /* out3 discharge speed */
u32 dis_out2:2; /* out2 discharge speed */
u32 dis_out1:2; /* out1 discharge speed */
u32 vroi_out4:1; /* out4 tie off */
u32 vroi_out3:1; /* out3 tie off */
u32 vroi_out2:1; /* out2 tie off */
u32 vroi_out1:1; /* out1 tie off */
u32 vroi_enable:1; /* enable tie off */
u32 codec_current_on:2; /* current level ON */
u32 codec_current_standby:2; /* current level STANDBY */
u32 codec_current_charge:2; /* codec current @ vmid charge */
};
struct snd_soc_codec;
struct wm8350_codec {
struct platform_device *pdev;
struct snd_soc_codec *codec;
struct wm8350_audio_platform_data *platform_data;
};
#endif

View File

@@ -135,6 +135,10 @@
#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */
/* Flow control flags */
#define FLOW_CTRL_TX 0x01
#define FLOW_CTRL_RX 0x02
/* This structure is used in all SIOCxMIIxxx ioctl calls */
struct mii_ioctl_data {
__u16 phy_id;
@@ -235,5 +239,34 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
return 0;
}
/**
* mii_resolve_flowctrl_fdx
* @lcladv: value of MII ADVERTISE register
* @rmtadv: value of MII LPA register
*
* Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3
*/
static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
if (lcladv & ADVERTISE_PAUSE_CAP) {
if (lcladv & ADVERTISE_PAUSE_ASYM) {
if (rmtadv & LPA_PAUSE_CAP)
cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
else if (rmtadv & LPA_PAUSE_ASYM)
cap = FLOW_CTRL_RX;
} else {
if (rmtadv & LPA_PAUSE_CAP)
cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
}
} else if (lcladv & ADVERTISE_PAUSE_ASYM) {
if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
cap = FLOW_CTRL_TX;
}
return cap;
}
#endif /* __KERNEL__ */
#endif /* __LINUX_MII_H__ */

View File

@@ -206,6 +206,7 @@ struct mlx4_caps {
int reserved_cqs;
int num_eqs;
int reserved_eqs;
int num_comp_vectors;
int num_mpts;
int num_mtt_segs;
int fmr_reserved_mtts;
@@ -328,6 +329,7 @@ struct mlx4_cq {
int arm_sn;
int cqn;
unsigned vector;
atomic_t refcount;
struct completion free;
@@ -437,7 +439,7 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
int collapsed);
unsigned vector, int collapsed);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);

View File

@@ -145,6 +145,23 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
/*
* This interface is used by x86 PAT code to identify a pfn mapping that is
* linear over entire vma. This is to optimize PAT code that deals with
* marking the physical region with a particular prot. This is not for generic
* mm use. Note also that this check will not work if the pfn mapping is
* linear for a vma starting at physical address 0. In which case PAT code
* falls back to slow path of reserving physical range page by page.
*/
static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
{
return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
}
static inline int is_pfn_mapping(struct vm_area_struct *vma)
{
return (vma->vm_flags & VM_PFNMAP);
}
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -781,6 +798,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
@@ -1286,5 +1305,7 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);
extern void *alloc_locked_buffer(size_t size);
extern void free_locked_buffer(void *buffer, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */

View File

@@ -117,6 +117,7 @@ struct sioc_mif_req6
#include <linux/pim.h>
#include <linux/skbuff.h> /* for struct sk_buff_head */
#include <net/net_namespace.h>
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
@@ -187,6 +188,9 @@ struct mif_device
struct mfc6_cache
{
struct mfc6_cache *next; /* Next entry on cache line */
#ifdef CONFIG_NET_NS
struct net *mfc6_net;
#endif
struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
struct in6_addr mf6c_origin; /* Source of packet */
mifi_t mf6c_parent; /* Source interface */
@@ -209,6 +213,18 @@ struct mfc6_cache
} mfc_un;
};
static inline
struct net *mfc6_net(const struct mfc6_cache *mfc)
{
return read_pnet(&mfc->mfc6_net);
}
static inline
void mfc6_net_set(struct mfc6_cache *mfc, struct net *net)
{
write_pnet(&mfc->mfc6_net, hold_net(net));
}
#define MFC_STATIC 1
#define MFC_NOTIFY 2
@@ -229,13 +245,17 @@ struct mfc6_cache
#ifdef __KERNEL__
struct rtmsg;
extern int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, int nowait);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket;
static inline struct sock *mroute6_socket(struct net *net)
{
return net->ipv6.mroute6_sk;
}
extern int ip6mr_sk_done(struct sock *sk);
#else
#define mroute6_socket NULL
static inline struct sock *mroute6_socket(struct net *net) { return NULL; }
static inline int ip6mr_sk_done(struct sock *sk) { return 0; }
#endif
#endif

View File

@@ -43,6 +43,9 @@
#include <net/net_namespace.h>
#include <net/dsa.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
struct vlan_group;
struct ethtool_ops;
@@ -311,14 +314,16 @@ struct napi_struct {
spinlock_t poll_lock;
int poll_owner;
struct net_device *dev;
struct list_head dev_list;
#endif
struct list_head dev_list;
struct sk_buff *gro_list;
};
enum
{
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
};
extern void __napi_schedule(struct napi_struct *n);
@@ -372,22 +377,8 @@ static inline int napi_reschedule(struct napi_struct *napi)
*
* Mark NAPI processing as complete.
*/
static inline void __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
list_del(&n->poll_list);
smp_mb__before_clear_bit();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
static inline void napi_complete(struct napi_struct *n)
{
unsigned long flags;
local_irq_save(flags);
__napi_complete(n);
local_irq_restore(flags);
}
extern void __napi_complete(struct napi_struct *n);
extern void napi_complete(struct napi_struct *n);
/**
* napi_disable - prevent NAPI from scheduling
@@ -451,6 +442,147 @@ struct netdev_queue {
struct Qdisc *qdisc_sleeping;
} ____cacheline_aligned_in_smp;
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
* This function is called once when network device is registered.
* The network device can use this to any late stage initializaton
* or semantic validattion. It can fail with an error code which will
* be propogated back to register_netdev
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
* This function is called when network device transistions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
* This function is called when network device transistions to the down
* state.
*
* int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
* Called when a packet needs to be transmitted.
* Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
* changes to configuration when multicast or promiscious is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
*
* void (*ndo_set_multicast_list)(struct net_device *dev);
* This function is called when the multicast address list changes.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If not this interface is not defined, the
* mac address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Called when a user request an ioctl which can't be handled by
* the generic interface code. If not defined ioctl's return
* not supported error code.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
* is retained for legacy reason, new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
* Called when a user wants to change the Maximum Transfer Unit
* of a device. If not defined, any request to change MTU will
* will return an error.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
* Callback uses when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* struct net_device_stats* (*get_stats)(struct net_device *dev);
* Called when a user wants to get the network device usage
* statistics. If not defined, the counters in dev->stats will
* be used.
*
* void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
* If device support VLAN receive accleration
* (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
* when vlan groups for the device changes. Note: grp is NULL
* if no vlan's groups are being used.
*
* void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is registered.
*
* void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
*/
#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
int (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb);
#define HAVE_CHANGE_RX_FLAGS
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
#define HAVE_SET_RX_MODE
void (*ndo_set_rx_mode)(struct net_device *dev);
#define HAVE_MULTICAST
void (*ndo_set_multicast_list)(struct net_device *dev);
#define HAVE_SET_MAC_ADDR
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
#define HAVE_VALIDATE_ADDR
int (*ndo_validate_addr)(struct net_device *dev);
#define HAVE_PRIVATE_IOCTL
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
#define HAVE_SET_CONFIG
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
#define HAVE_CHANGE_MTU
int (*ndo_change_mtu)(struct net_device *dev,
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
#define HAVE_TX_TIMEOUT
void (*ndo_tx_timeout) (struct net_device *dev);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
void (*ndo_vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
#define HAVE_NETDEV_POLL
void (*ndo_poll_controller)(struct net_device *dev);
#endif
};
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
@@ -495,14 +627,7 @@ struct net_device
unsigned long state;
struct list_head dev_list;
#ifdef CONFIG_NETPOLL
struct list_head napi_list;
#endif
/* The device initialization function. Called only once. */
int (*init)(struct net_device *dev);
/* ------- Fields preinitialized in Space.c finish here ------- */
/* Net device features */
unsigned long features;
@@ -521,6 +646,7 @@ struct net_device
#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
/* do not use LLTX in new drivers */
#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
#define NETIF_F_GRO 16384 /* Generic receive offload */
#define NETIF_F_LRO 32768 /* large receive offload */
/* Segmentation offload features */
@@ -546,15 +672,13 @@ struct net_device
* for all in netdev_increment_features.
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
NETIF_F_FRAGLIST)
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
struct net_device_stats* (*get_stats)(struct net_device *dev);
struct net_device_stats stats;
#ifdef CONFIG_WIRELESS_EXT
@@ -564,18 +688,13 @@ struct net_device
/* Instance data managed by the core of Wireless Extensions. */
struct iw_public_data * wireless_data;
#endif
/* Management operations */
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
/* Hardware header description */
const struct header_ops *header_ops;
/*
* This marks the end of the "visible" part of the structure. All
* fields hereafter are internal to the system, and may change at
* will (read: may be cleaned up at will).
*/
unsigned int flags; /* interface flags (a la BSD) */
unsigned short gflags;
unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
@@ -634,7 +753,7 @@ struct net_device
unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
because most packets are unicast) */
because most packets are unicast) */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
@@ -653,18 +772,12 @@ struct net_device
/*
* One part is mostly used on xmit path (device)
*/
void *priv; /* pointer to private data */
int (*hard_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
/* These may be needed for future network-power-down code. */
unsigned long trans_start; /* Time (in jiffies) of last Tx */
int watchdog_timeo; /* used by dev_watchdog() */
struct timer_list watchdog_timer;
/*
* refcnt is a very hot point, so align it on SMP
*/
/* Number of references to this device */
atomic_t refcnt ____cacheline_aligned_in_smp;
@@ -683,56 +796,12 @@ struct net_device
NETREG_RELEASED, /* called free_netdev */
} reg_state;
/* Called after device is detached from network. */
void (*uninit)(struct net_device *dev);
/* Called after last user reference disappears. */
void (*destructor)(struct net_device *dev);
/* Called from unregister, can be used to call free_netdev */
void (*destructor)(struct net_device *dev);
/* Pointers to interface service routines. */
int (*open)(struct net_device *dev);
int (*stop)(struct net_device *dev);
#define HAVE_NETDEV_POLL
#define HAVE_CHANGE_RX_FLAGS
void (*change_rx_flags)(struct net_device *dev,
int flags);
#define HAVE_SET_RX_MODE
void (*set_rx_mode)(struct net_device *dev);
#define HAVE_MULTICAST
void (*set_multicast_list)(struct net_device *dev);
#define HAVE_SET_MAC_ADDR
int (*set_mac_address)(struct net_device *dev,
void *addr);
#define HAVE_VALIDATE_ADDR
int (*validate_addr)(struct net_device *dev);
#define HAVE_PRIVATE_IOCTL
int (*do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
#define HAVE_SET_CONFIG
int (*set_config)(struct net_device *dev,
struct ifmap *map);
#define HAVE_CHANGE_MTU
int (*change_mtu)(struct net_device *dev, int new_mtu);
#define HAVE_TX_TIMEOUT
void (*tx_timeout) (struct net_device *dev);
void (*vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
#ifdef CONFIG_NETPOLL
struct netpoll_info *npinfo;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*poll_controller)(struct net_device *dev);
#endif
u16 (*select_queue)(struct net_device *dev,
struct sk_buff *skb);
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
@@ -763,6 +832,49 @@ struct net_device
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
#ifdef CONFIG_COMPAT_NET_DEV_OPS
struct {
int (*init)(struct net_device *dev);
void (*uninit)(struct net_device *dev);
int (*open)(struct net_device *dev);
int (*stop)(struct net_device *dev);
int (*hard_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*select_queue)(struct net_device *dev,
struct sk_buff *skb);
void (*change_rx_flags)(struct net_device *dev,
int flags);
void (*set_rx_mode)(struct net_device *dev);
void (*set_multicast_list)(struct net_device *dev);
int (*set_mac_address)(struct net_device *dev,
void *addr);
int (*validate_addr)(struct net_device *dev);
int (*do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*set_config)(struct net_device *dev,
struct ifmap *map);
int (*change_mtu)(struct net_device *dev, int new_mtu);
int (*neigh_setup)(struct net_device *dev,
struct neigh_parms *);
void (*tx_timeout) (struct net_device *dev);
struct net_device_stats* (*get_stats)(struct net_device *dev);
void (*vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*poll_controller)(struct net_device *dev);
#endif
};
#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -858,22 +970,8 @@ static inline void *netdev_priv(const struct net_device *dev)
* netif_napi_add() must be used to initialize a napi context prior to calling
* *any* of the other napi related functions.
*/
static inline void netif_napi_add(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight)
{
INIT_LIST_HEAD(&napi->poll_list);
napi->poll = poll;
napi->weight = weight;
#ifdef CONFIG_NETPOLL
napi->dev = dev;
list_add(&napi->dev_list, &dev->napi_list);
spin_lock_init(&napi->poll_lock);
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
}
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
* netif_napi_del - remove a napi context
@@ -881,12 +979,20 @@ static inline void netif_napi_add(struct net_device *dev,
*
* netif_napi_del() removes a napi context from the network device napi list
*/
static inline void netif_napi_del(struct napi_struct *napi)
{
#ifdef CONFIG_NETPOLL
list_del(&napi->dev_list);
#endif
}
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
/* This is non-zero if the packet may be of the same flow. */
int same_flow;
/* This is non-zero if the packet cannot be merged with the new skb. */
int flush;
/* Number of segments aggregated. */
int count;
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
@@ -898,6 +1004,9 @@ struct packet_type {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
@@ -1251,6 +1360,9 @@ extern int netif_rx(struct sk_buff *skb);
extern int netif_rx_ni(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
extern void napi_gro_flush(struct napi_struct *napi);
extern int napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
@@ -1443,8 +1555,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
}
/* Test if receive needs to be scheduled but only if up */
static inline int netif_rx_schedule_prep(struct net_device *dev,
struct napi_struct *napi)
static inline int netif_rx_schedule_prep(struct napi_struct *napi)
{
return napi_schedule_prep(napi);
}
@@ -1452,27 +1563,24 @@ static inline int netif_rx_schedule_prep(struct net_device *dev,
/* Add interface to tail of rx poll list. This assumes that _prep has
* already been called and returned 1.
*/
static inline void __netif_rx_schedule(struct net_device *dev,
struct napi_struct *napi)
static inline void __netif_rx_schedule(struct napi_struct *napi)
{
__napi_schedule(napi);
}
/* Try to reschedule poll. Called by irq handler. */
static inline void netif_rx_schedule(struct net_device *dev,
struct napi_struct *napi)
static inline void netif_rx_schedule(struct napi_struct *napi)
{
if (netif_rx_schedule_prep(dev, napi))
__netif_rx_schedule(dev, napi);
if (netif_rx_schedule_prep(napi))
__netif_rx_schedule(napi);
}
/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
static inline int netif_rx_reschedule(struct net_device *dev,
struct napi_struct *napi)
static inline int netif_rx_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__netif_rx_schedule(dev, napi);
__netif_rx_schedule(napi);
return 1;
}
return 0;
@@ -1481,8 +1589,7 @@ static inline int netif_rx_reschedule(struct net_device *dev,
/* same as netif_rx_complete, except that local_irq_save(flags)
* has already been issued
*/
static inline void __netif_rx_complete(struct net_device *dev,
struct napi_struct *napi)
static inline void __netif_rx_complete(struct napi_struct *napi)
{
__napi_complete(napi);
}
@@ -1492,14 +1599,9 @@ static inline void __netif_rx_complete(struct net_device *dev,
* it completes the work. The device cannot be out of poll list at this
* moment, it is BUG().
*/
static inline void netif_rx_complete(struct net_device *dev,
struct napi_struct *napi)
static inline void netif_rx_complete(struct napi_struct *napi)
{
unsigned long flags;
local_irq_save(flags);
__netif_rx_complete(dev, napi);
local_irq_restore(flags);
napi_complete(napi);
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
@@ -1676,6 +1778,8 @@ extern void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
extern int netdev_max_backlog;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
@@ -1724,6 +1828,8 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
(skb_shinfo(skb)->frag_list &&
!(dev->features & NETIF_F_FRAGLIST)) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
@@ -1742,26 +1848,31 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
struct net_device *dev = skb->dev;
struct net_device *master = dev->master;
if (master &&
(dev->priv_flags & IFF_SLAVE_INACTIVE)) {
if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
skb->protocol == __constant_htons(ETH_P_ARP))
return 0;
if (master) {
if (master->priv_flags & IFF_MASTER_ARPMON)
dev->last_rx = jiffies;
if (master->priv_flags & IFF_MASTER_ALB) {
if (skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
skb->protocol == __constant_htons(ETH_P_ARP))
return 0;
}
if (master->priv_flags & IFF_MASTER_8023AD &&
skb->protocol == __constant_htons(ETH_P_SLOW))
return 0;
return 1;
if (master->priv_flags & IFF_MASTER_ALB) {
if (skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return 0;
}
if (master->priv_flags & IFF_MASTER_8023AD &&
skb->protocol == __constant_htons(ETH_P_SLOW))
return 0;
return 1;
}
}
return 0;
}
extern struct pernet_operations __net_initdata loopback_net_ops;
#endif /* __KERNEL__ */
#endif /* _LINUX_DEV_H */

View File

@@ -141,6 +141,7 @@ enum ctattr_protonat {
#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
enum ctattr_natseq {
CTA_NAT_SEQ_UNSPEC,
CTA_NAT_SEQ_CORRECTION_POS,
CTA_NAT_SEQ_OFFSET_BEFORE,
CTA_NAT_SEQ_OFFSET_AFTER,

View File

@@ -300,7 +300,8 @@ struct ebt_table
#define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \
~(__alignof__(struct ebt_replace)-1))
extern int ebt_register_table(struct ebt_table *table);
extern struct ebt_table *ebt_register_table(struct net *net,
struct ebt_table *table);
extern void ebt_unregister_table(struct ebt_table *table);
extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,

View File

@@ -1,6 +1,8 @@
#ifndef _IPT_POLICY_H
#define _IPT_POLICY_H
#include <linux/netfilter/xt_policy.h>
#define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
/* ipt_policy_flags */

View File

@@ -1,6 +1,8 @@
#ifndef _IP6T_POLICY_H
#define _IP6T_POLICY_H
#include <linux/netfilter/xt_policy.h>
#define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
/* ip6t_policy_flags */

View File

@@ -242,7 +242,8 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}

View File

@@ -94,11 +94,6 @@ static inline void netpoll_poll_unlock(void *have)
rcu_read_unlock();
}
static inline void netpoll_netdev_init(struct net_device *dev)
{
INIT_LIST_HEAD(&dev->napi_list);
}
#else
static inline int netpoll_rx(struct sk_buff *skb)
{

View File

@@ -3,7 +3,26 @@
/*
* 802.11 netlink interface public header
*
* Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2006, 2007, 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2008 Michael Wu <flamingice@sourmilk.net>
* Copyright 2008 Luis Carlos Cobo <luisca@cozybit.com>
* Copyright 2008 Michael Buesch <mb@bu3sch.de>
* Copyright 2008 Luis R. Rodriguez <lrodriguez@atheros.com>
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/**
@@ -25,8 +44,10 @@
*
* @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request
* to get a list of all present wiphys.
* @NL80211_CMD_SET_WIPHY: set wiphy name, needs %NL80211_ATTR_WIPHY and
* %NL80211_ATTR_WIPHY_NAME.
* @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or
* %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME,
* %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or
* %NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET.
* @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request
* or rename notification. Has attributes %NL80211_ATTR_WIPHY and
* %NL80211_ATTR_WIPHY_NAME.
@@ -106,6 +127,12 @@
* to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
* store this as a valid request and then query userspace for it.
*
* @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the
* interface identified by %NL80211_ATTR_IFINDEX
*
* @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the
* interface identified by %NL80211_ATTR_IFINDEX
*
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -148,6 +175,9 @@ enum nl80211_commands {
NL80211_CMD_SET_REG,
NL80211_CMD_REQ_SET_REG,
NL80211_CMD_GET_MESH_PARAMS,
NL80211_CMD_SET_MESH_PARAMS,
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -169,6 +199,15 @@ enum nl80211_commands {
* @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf.
* /sys/class/ieee80211/<phyname>/index
* @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming)
* @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters
* @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz
* @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ
* if HT20 or HT40 are allowed (i.e., 802.11n disabled if not included):
* NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including
* this attribute)
* NL80211_CHAN_HT20 = HT20 only
* NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel
* NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel
*
* @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
* @NL80211_ATTR_IFNAME: network interface name
@@ -234,6 +273,9 @@ enum nl80211_commands {
* (u8, 0 or 1)
* @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled
* (u8, 0 or 1)
* @NL80211_ATTR_BSS_BASIC_RATES: basic rates, array of basic
* rates in format defined by IEEE 802.11 7.3.2.2 but without the length
* restriction (at most %NL80211_MAX_SUPP_RATES).
*
* @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION)
@@ -296,6 +338,14 @@ enum nl80211_attrs {
NL80211_ATTR_REG_ALPHA2,
NL80211_ATTR_REG_RULES,
NL80211_ATTR_MESH_PARAMS,
NL80211_ATTR_BSS_BASIC_RATES,
NL80211_ATTR_WIPHY_TXQ_PARAMS,
NL80211_ATTR_WIPHY_FREQ,
NL80211_ATTR_WIPHY_CHANNEL_TYPE,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -307,6 +357,10 @@ enum nl80211_attrs {
* here
*/
#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY
#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES
#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS
#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ
#define NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_REG_RULES 32
@@ -370,6 +424,32 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1
};
/**
* enum nl80211_rate_info - bitrate information
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
* when getting information about the bitrate of a station.
*
* @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved
* @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s)
* @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8)
* @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate
* @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval
* @NL80211_RATE_INFO_MAX: highest rate_info number currently defined
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
__NL80211_RATE_INFO_INVALID,
NL80211_RATE_INFO_BITRATE,
NL80211_RATE_INFO_MCS,
NL80211_RATE_INFO_40_MHZ_WIDTH,
NL80211_RATE_INFO_SHORT_GI,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1
};
/**
* enum nl80211_sta_info - station information
*
@@ -382,6 +462,9 @@ enum nl80211_sta_flags {
* @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
* @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
* @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
* containing info as possible, see &enum nl80211_sta_info_txrate.
*/
enum nl80211_sta_info {
__NL80211_STA_INFO_INVALID,
@@ -391,6 +474,8 @@ enum nl80211_sta_info {
NL80211_STA_INFO_LLID,
NL80211_STA_INFO_PLID,
NL80211_STA_INFO_PLINK_STATE,
NL80211_STA_INFO_SIGNAL,
NL80211_STA_INFO_TX_BITRATE,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -452,17 +537,29 @@ enum nl80211_mpath_info {
* an array of nested frequency attributes
* @NL80211_BAND_ATTR_RATES: supported bitrates in this band,
* an array of nested bitrate attributes
* @NL80211_BAND_ATTR_HT_MCS_SET: 16-byte attribute containing the MCS set as
* defined in 802.11n
* @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE
* @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n
* @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n
*/
enum nl80211_band_attr {
__NL80211_BAND_ATTR_INVALID,
NL80211_BAND_ATTR_FREQS,
NL80211_BAND_ATTR_RATES,
NL80211_BAND_ATTR_HT_MCS_SET,
NL80211_BAND_ATTR_HT_CAPA,
NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
};
#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA
/**
* enum nl80211_frequency_attr - frequency attributes
* @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
@@ -474,6 +571,8 @@ enum nl80211_band_attr {
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm
* (100 * dBm).
*/
enum nl80211_frequency_attr {
__NL80211_FREQUENCY_ATTR_INVALID,
@@ -482,12 +581,15 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_PASSIVE_SCAN,
NL80211_FREQUENCY_ATTR_NO_IBSS,
NL80211_FREQUENCY_ATTR_RADAR,
NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1
};
#define NL80211_FREQUENCY_ATTR_MAX_TX_POWER NL80211_FREQUENCY_ATTR_MAX_TX_POWER
/**
* enum nl80211_bitrate_attr - bitrate attributes
* @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps
@@ -594,4 +696,119 @@ enum nl80211_mntr_flags {
NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1
};
/**
* enum nl80211_meshconf_params - mesh configuration parameters
*
* Mesh configuration parameters
*
* @__NL80211_MESHCONF_INVALID: internal use
*
* @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in
* millisecond units, used by the Peer Link Open message
*
* @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the inital confirm timeout, in
* millisecond units, used by the peer link management to close a peer link
*
* @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in
* millisecond units
*
* @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed
* on this mesh interface
*
* @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link
* open retries that can be sent to establish a new peer link instance in a
* mesh
*
* @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh
* point.
*
* @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically
* open peer links when we detect compatible mesh peers.
*
* @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames
* containing a PREQ that an MP can send to a particular destination (path
* target)
*
* @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths
* (in milliseconds)
*
* @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait
* until giving up on a path discovery (in milliseconds)
*
* @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh
* points receiving a PREQ shall consider the forwarding information from the
* root to be valid. (TU = time unit)
*
* @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in
* TUs) during which an MP can send only one action frame containing a PREQ
* reference element
*
* @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
* that it takes for an HWMP information element to propagate across the mesh
*
* @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
enum nl80211_meshconf_params {
__NL80211_MESHCONF_INVALID,
NL80211_MESHCONF_RETRY_TIMEOUT,
NL80211_MESHCONF_CONFIRM_TIMEOUT,
NL80211_MESHCONF_HOLDING_TIMEOUT,
NL80211_MESHCONF_MAX_PEER_LINKS,
NL80211_MESHCONF_MAX_RETRIES,
NL80211_MESHCONF_TTL,
NL80211_MESHCONF_AUTO_OPEN_PLINKS,
NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
NL80211_MESHCONF_PATH_REFRESH_TIME,
NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
NL80211_MESHCONF_ATTR_MAX = __NL80211_MESHCONF_ATTR_AFTER_LAST - 1
};
/**
* enum nl80211_txq_attr - TX queue parameter attributes
* @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved
* @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*)
* @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning
* disabled
* @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form
* 2^n-1 in the range 1..32767]
* @NL80211_TXQ_ATTR_CWMAX: Maximum contention window [a value of the form
* 2^n-1 in the range 1..32767]
* @NL80211_TXQ_ATTR_AIFS: Arbitration interframe space [0..255]
* @__NL80211_TXQ_ATTR_AFTER_LAST: Internal
* @NL80211_TXQ_ATTR_MAX: Maximum TXQ attribute number
*/
enum nl80211_txq_attr {
__NL80211_TXQ_ATTR_INVALID,
NL80211_TXQ_ATTR_QUEUE,
NL80211_TXQ_ATTR_TXOP,
NL80211_TXQ_ATTR_CWMIN,
NL80211_TXQ_ATTR_CWMAX,
NL80211_TXQ_ATTR_AIFS,
/* keep last */
__NL80211_TXQ_ATTR_AFTER_LAST,
NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1
};
enum nl80211_txq_q {
NL80211_TXQ_Q_VO,
NL80211_TXQ_Q_VI,
NL80211_TXQ_Q_BE,
NL80211_TXQ_Q_BK
};
enum nl80211_channel_type {
NL80211_CHAN_NO_HT,
NL80211_CHAN_HT20,
NL80211_CHAN_HT40MINUS,
NL80211_CHAN_HT40PLUS
};
#endif /* __LINUX_NL80211_H */

View File

@@ -27,7 +27,6 @@ struct nsproxy {
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns;
struct user_namespace *user_ns;
struct net *net_ns;
};
extern struct nsproxy init_nsproxy;

View File

@@ -57,6 +57,12 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
#define for_each_node_with_property(dn, prop_name) \
for (dn = of_find_node_with_property(NULL, prop_name); dn; \
dn = of_find_node_with_property(dn, prop_name))
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);

View File

@@ -14,9 +14,22 @@
#ifndef __LINUX_OF_GPIO_H
#define __LINUX_OF_GPIO_H
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gpio.h>
struct device_node;
/*
* This is Linux-specific flags. By default controllers' and Linux' mapping
* match, but GPIO controllers are free to translate their own flags to
* Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
*/
enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
};
#ifdef CONFIG_OF_GPIO
/*
@@ -26,7 +39,7 @@ struct of_gpio_chip {
struct gpio_chip gc;
int gpio_cells;
int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np,
const void *gpio_spec);
const void *gpio_spec, enum of_gpio_flags *flags);
};
static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc)
@@ -50,20 +63,43 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
return container_of(of_gc, struct of_mm_gpio_chip, of_gc);
}
extern int of_get_gpio(struct device_node *np, int index);
extern int of_get_gpio_flags(struct device_node *np, int index,
enum of_gpio_flags *flags);
extern unsigned int of_gpio_count(struct device_node *np);
extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc,
struct device_node *np,
const void *gpio_spec);
const void *gpio_spec,
enum of_gpio_flags *flags);
#else
/* Drivers may not strictly depend on the GPIO support, so let them link. */
static inline int of_get_gpio(struct device_node *np, int index)
static inline int of_get_gpio_flags(struct device_node *np, int index,
enum of_gpio_flags *flags)
{
return -ENOSYS;
}
static inline unsigned int of_gpio_count(struct device_node *np)
{
return 0;
}
#endif /* CONFIG_OF_GPIO */
/**
* of_get_gpio - Get a GPIO number to use with GPIO API
* @np: device node to get GPIO from
* @index: index of the GPIO
*
* Returns GPIO number to use with Linux generic GPIO API, or one of the errno
* value on the error condition.
*/
static inline int of_get_gpio(struct device_node *np, int index)
{
return of_get_gpio_flags(np, index, NULL);
}
#endif /* __LINUX_OF_GPIO_H */

View File

@@ -134,6 +134,11 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
};
enum pci_irq_reroute_variant {
INTEL_IRQ_REROUTE_VARIANT = 1,
MAX_IRQ_REROUTE_VARIANTS = 3
};
typedef unsigned short __bitwise pci_bus_flags_t;
enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
@@ -218,6 +223,7 @@ struct pci_dev {
unsigned int no_msi:1; /* device may not use msi */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */

View File

@@ -2304,6 +2304,10 @@
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
@@ -2376,6 +2380,7 @@
#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530

View File

@@ -467,6 +467,8 @@ int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
void phy_prepare_link(struct phy_device *phydev,

View File

@@ -147,9 +147,9 @@ pid_t pid_vnr(struct pid *pid);
#define do_each_pid_task(pid, type, task) \
do { \
struct hlist_node *pos___; \
if (pid != NULL) \
if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), pos___, \
&pid->tasks[type], pids[type].node) {
&(pid)->tasks[type], pids[type].node) {
/*
* Both old and new leaders may be attached to

View File

@@ -394,6 +394,20 @@ enum
#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
/* Cgroup classifier */
enum
{
TCA_CGROUP_UNSPEC,
TCA_CGROUP_ACT,
TCA_CGROUP_POLICE,
TCA_CGROUP_EMATCHES,
__TCA_CGROUP_MAX,
};
#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
/* Extended Matches */
struct tcf_ematch_tree_hdr

View File

@@ -500,4 +500,20 @@ struct tc_netem_corrupt
#define NETEM_DIST_SCALE 8192
/* DRR */
enum
{
TCA_DRR_UNSPEC,
TCA_DRR_QUANTUM,
__TCA_DRR_MAX
};
#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
struct tc_drr_stats
{
u32 deficit;
};
#endif

View File

@@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
extern void __ptrace_unlink(struct task_struct *child);
extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
#define PTRACE_MODE_READ 1
#define PTRACE_MODE_ATTACH 2
/* Returns 0 on success, -errno on denial. */
@@ -313,6 +314,27 @@ static inline void user_enable_block_step(struct task_struct *task)
#define arch_ptrace_stop(code, info) do { } while (0)
#endif
#ifndef arch_ptrace_untrace
/*
* Do machine-specific work before untracing child.
*
* This is called for a normal detach as well as from ptrace_exit()
* when the tracing task dies.
*
* Called with write_lock(&tasklist_lock) held.
*/
#define arch_ptrace_untrace(task) do { } while (0)
#endif
#ifndef arch_ptrace_fork
/*
* Do machine-specific work to initialize a new task.
*
* This is called from copy_process().
*/
#define arch_ptrace_fork(child, clone_flags) do { } while (0)
#endif
extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);

View File

@@ -0,0 +1,110 @@
#ifndef _LINUX_RCULIST_NULLS_H
#define _LINUX_RCULIST_NULLS_H
#ifdef __KERNEL__
/*
* RCU-protected list version
*/
#include <linux/list_nulls.h>
#include <linux/rcupdate.h>
/**
* hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
* @n: the element to delete from the hash list.
*
* Note: hlist_nulls_unhashed() on the node return true after this. It is
* useful for RCU based read lockfree traversal if the writer side
* must know if the list entry is still hashed or already unhashed.
*
* In particular, it means that we can not poison the forward pointers
* that may still be used for walking the hash list and we can only
* zero the pprev pointer so list_unhashed() will return true after
* this.
*
* The caller must take whatever precautions are necessary (such as
* holding appropriate locks) to avoid racing with another
* list-mutation primitive, such as hlist_nulls_add_head_rcu() or
* hlist_nulls_del_rcu(), running on this same list. However, it is
* perfectly legal to run concurrently with the _rcu list-traversal
* primitives, such as hlist_nulls_for_each_entry_rcu().
*/
static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
n->pprev = NULL;
}
}
/**
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
* Note: hlist_nulls_unhashed() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the hash list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry().
*/
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
n->pprev = LIST_POISON2;
}
/**
* hlist_nulls_add_head_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist_nulls,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *first = h->first;
n->next = first;
n->pprev = &h->first;
rcu_assign_pointer(h->first, n);
if (!is_a_nulls(first))
first->pprev = &n->next;
}
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_nulls_node within the struct.
*
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
for (pos = rcu_dereference((head)->first); \
(!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference(pos->next))
#endif
#endif

View File

@@ -142,6 +142,7 @@ struct rcu_head {
* on the write-side to insure proper synchronization.
*/
#define rcu_read_lock_sched() preempt_disable()
#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
/*
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
@@ -149,6 +150,7 @@ struct rcu_head {
* See rcu_read_lock_sched for more information.
*/
#define rcu_read_unlock_sched() preempt_enable()
#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()

View File

@@ -108,6 +108,7 @@ struct rfkill {
struct device dev;
struct list_head node;
enum rfkill_state state_for_resume;
};
#define to_rfkill(d) container_of(d, struct rfkill, dev)
@@ -148,11 +149,4 @@ static inline char *rfkill_get_led_name(struct rfkill *rfkill)
#endif
}
/* rfkill notification chain */
#define RFKILL_STATE_CHANGED 0x0001 /* state of a normal rfkill
switch has changed */
int register_rfkill_notifier(struct notifier_block *nb);
int unregister_rfkill_notifier(struct notifier_block *nb);
#endif /* RFKILL_H */

View File

@@ -28,17 +28,19 @@ struct ring_buffer_event {
* size = 8 bytes
*
* @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
* array[0] = tv_nsec
* array[1] = tv_sec
* array[0] = tv_nsec
* array[1..2] = tv_sec
* size = 16 bytes
*
* @RINGBUF_TYPE_DATA: Data record
* If len is zero:
* array[0] holds the actual length
* array[1..(length+3)/4-1] holds data
* array[1..(length+3)/4] holds data
* size = 4 + 4 + length (bytes)
* else
* length = len << 2
* array[0..(length+3)/4] holds data
* array[0..(length+3)/4-1] holds data
* size = 4 + length (bytes)
*/
enum ring_buffer_type {
RINGBUF_TYPE_PADDING,
@@ -122,6 +124,12 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
void tracing_on(void);
void tracing_off(void);
void tracing_off_permanent(void);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer,
void **data_page, int cpu, int full);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,

View File

@@ -427,9 +427,9 @@ void rio_dev_put(struct rio_dev *);
* Get the unique RIO device identifier. Returns the device
* identifier string.
*/
static inline char *rio_name(struct rio_dev *rdev)
static inline const char *rio_name(struct rio_dev *rdev)
{
return rdev->dev.bus_id;
return dev_name(&rdev->dev);
}
/**

View File

@@ -107,6 +107,11 @@ enum {
RTM_GETADDRLABEL,
#define RTM_GETADDRLABEL RTM_GETADDRLABEL
RTM_GETDCB = 78,
#define RTM_GETDCB RTM_GETDCB
RTM_SETDCB,
#define RTM_SETDCB RTM_SETDCB
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};

View File

@@ -96,6 +96,7 @@ struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio;
struct bts_tracer;
/*
* List of flags we want to share for kernel threads,
@@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
}
#endif
extern unsigned long rt_needs_cpu(int cpu);
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
@@ -572,12 +571,6 @@ struct signal_struct {
*/
struct rlimit rlim[RLIM_NLIMITS];
/* keep the process-shared keyrings here so that they do the right
* thing in threads created with CLONE_THREAD */
#ifdef CONFIG_KEYS
struct key *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pacct_struct pacct; /* per-process accounting information */
#endif
@@ -648,6 +641,7 @@ struct user_struct {
/* Hash table maintenance information */
struct hlist_node uidhash_node;
uid_t uid;
struct user_namespace *user_ns;
#ifdef CONFIG_USER_SCHED
struct task_group *tg;
@@ -665,6 +659,7 @@ extern struct user_struct *find_user(uid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
struct backing_dev_info;
struct reclaim_state;
@@ -672,8 +667,7 @@ struct reclaim_state;
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
unsigned long long cpu_time, /* time spent on the cpu */
run_delay; /* time spent waiting on a runqueue */
unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
@@ -888,38 +882,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
#endif /* !CONFIG_SMP */
struct io_context; /* See blkdev.h */
#define NGROUPS_SMALL 32
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
struct group_info {
int ngroups;
atomic_t usage;
gid_t small_block[NGROUPS_SMALL];
int nblocks;
gid_t *blocks[0];
};
/*
* get_group_info() must be called with the owning task locked (via task_lock())
* when task != current. The reason being that the vast majority of callers are
* looking at current->group_info, which can not be changed except by the
* current task. Changing current->group_info requires the task lock, too.
*/
#define get_group_info(group_info) do { \
atomic_inc(&(group_info)->usage); \
} while (0)
#define put_group_info(group_info) do { \
if (atomic_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
extern struct group_info *groups_alloc(int gidsetsize);
extern void groups_free(struct group_info *group_info);
extern int set_current_groups(struct group_info *group_info);
extern int groups_search(struct group_info *group_info, gid_t grp);
/* access the groups "array" with this macro */
#define GROUP_AT(gi, i) \
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
extern void prefetch_stack(struct task_struct *t);
@@ -1165,6 +1128,19 @@ struct task_struct {
struct list_head ptraced;
struct list_head ptrace_entry;
#ifdef CONFIG_X86_PTRACE_BTS
/*
* This is the tracer handle for the ptrace BTS extension.
* This field actually belongs to the ptracer task.
*/
struct bts_tracer *bts;
/*
* The buffer to hold the BTS data.
*/
void *bts_buffer;
size_t bts_size;
#endif /* CONFIG_X86_PTRACE_BTS */
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
@@ -1186,17 +1162,12 @@ struct task_struct {
struct list_head cpu_timers[3];
/* process credentials */
uid_t uid,euid,suid,fsuid;
gid_t gid,egid,sgid,fsgid;
struct group_info *group_info;
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
struct user_struct *user;
unsigned securebits;
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested keys to */
struct key *request_key_auth; /* assumed request_key authority */
struct key *thread_keyring; /* keyring private to this thread */
#endif
const struct cred *real_cred; /* objective and real subjective task
* credentials (COW) */
const struct cred *cred; /* effective (overridable) subjective task
* credentials (COW) */
struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
@@ -1233,9 +1204,6 @@ struct task_struct {
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
#ifdef CONFIG_SECURITY
void *security;
#endif
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
uid_t loginuid;
@@ -1356,6 +1324,23 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
#endif
};
/*
@@ -1775,7 +1760,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
return u;
}
extern void free_uid(struct user_struct *);
extern void switch_uid(struct user_struct *);
extern void release_uids(struct user_namespace *ns);
#include <asm/current.h>
@@ -1794,9 +1778,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
extern void sched_fork(struct task_struct *p, int clone_flags);
extern void sched_dead(struct task_struct *p);
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
@@ -1928,6 +1909,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
extern bool is_single_threaded(struct task_struct *);
/*
* Careful: do_each_thread/while_each_thread is a double loop so
* 'break' will not work as expected - use goto instead.
@@ -2224,6 +2207,7 @@ extern void normalize_rt_tasks(void);
extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
extern void set_tg_uid(struct user_struct *user);
#endif
extern struct task_group *sched_create_group(struct task_group *parent);

View File

@@ -32,7 +32,7 @@
setting is locked or not. A setting which is locked cannot be
changed from user-level. */
#define issecure_mask(X) (1 << (X))
#define issecure(X) (issecure_mask(X) & current->securebits)
#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
issecure_mask(SECURE_NO_SETUID_FIXUP) | \

View File

@@ -37,6 +37,10 @@
/* Maximum number of letters for an LSM name string */
#define SECURITY_NAME_MAX 10
/* If capable should audit the security request */
#define SECURITY_CAP_NOAUDIT 0
#define SECURITY_CAP_AUDIT 1
struct ctl_table;
struct audit_krule;
@@ -44,25 +48,25 @@ struct audit_krule;
* These functions are in security/capability.c and are used
* as the default capabilities functions
*/
extern int cap_capable(struct task_struct *tsk, int cap);
extern int cap_capable(struct task_struct *tsk, int cap, int audit);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_bprm_set_security(struct linux_binprm *bprm);
extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
extern int cap_bprm_set_creds(struct linux_binprm *bprm);
extern int cap_bprm_secureexec(struct linux_binprm *bprm);
extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
extern int cap_inode_need_killpriv(struct dentry *dentry);
extern int cap_inode_killpriv(struct dentry *dentry);
extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
extern void cap_task_reparent_to_init(struct task_struct *p);
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5, long *rc_p);
unsigned long arg4, unsigned long arg5);
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
@@ -105,7 +109,7 @@ extern unsigned long mmap_min_addr;
struct sched_param;
struct request_sock;
/* bprm_apply_creds unsafe reasons */
/* bprm->unsafe reasons */
#define LSM_UNSAFE_SHARE 1
#define LSM_UNSAFE_PTRACE 2
#define LSM_UNSAFE_PTRACE_CAP 4
@@ -149,36 +153,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
*
* Security hooks for program execution operations.
*
* @bprm_alloc_security:
* Allocate and attach a security structure to the @bprm->security field.
* The security field is initialized to NULL when the bprm structure is
* allocated.
* @bprm contains the linux_binprm structure to be modified.
* Return 0 if operation was successful.
* @bprm_free_security:
* @bprm contains the linux_binprm structure to be modified.
* Deallocate and clear the @bprm->security field.
* @bprm_apply_creds:
* Compute and set the security attributes of a process being transformed
* by an execve operation based on the old attributes (current->security)
* and the information saved in @bprm->security by the set_security hook.
* Since this hook function (and its caller) are void, this hook can not
* return an error. However, it can leave the security attributes of the
* process unchanged if an access failure occurs at this point.
* bprm_apply_creds is called under task_lock. @unsafe indicates various
* reasons why it may be unsafe to change security state.
* @bprm contains the linux_binprm structure.
* @bprm_post_apply_creds:
* Runs after bprm_apply_creds with the task_lock dropped, so that
* functions which cannot be called safely under the task_lock can
* be used. This hook is a good place to perform state changes on
* the process such as closing open file descriptors to which access
* is no longer granted if the attributes were changed.
* Note that a security module might need to save state between
* bprm_apply_creds and bprm_post_apply_creds to store the decision
* on whether the process may proceed.
* @bprm contains the linux_binprm structure.
* @bprm_set_security:
* @bprm_set_creds:
* Save security information in the bprm->security field, typically based
* on information about the bprm->file, for later use by the apply_creds
* hook. This hook may also optionally check permissions (e.g. for
@@ -191,15 +166,30 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_check_security:
* This hook mediates the point when a search for a binary handler will
* begin. It allows a check the @bprm->security value which is set in
* the preceding set_security call. The primary difference from
* set_security is that the argv list and envp list are reliably
* available in @bprm. This hook may be called multiple times
* during a single execve; and in each pass set_security is called
* first.
* This hook mediates the point when a search for a binary handler will
* begin. It allows a check the @bprm->security value which is set in the
* preceding set_creds call. The primary difference from set_creds is
* that the argv list and envp list are reliably available in @bprm. This
* hook may be called multiple times during a single execve; and in each
* pass set_creds is called first.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_committing_creds:
* Prepare to install the new security attributes of a process being
* transformed by an execve operation, based on the old credentials
* pointed to by @current->cred and the information set in @bprm->cred by
* the bprm_set_creds hook. @bprm points to the linux_binprm structure.
* This hook is a good place to perform state changes on the process such
* as closing open file descriptors to which access will no longer be
* granted when the attributes are changed. This is called immediately
* before commit_creds().
* @bprm_committed_creds:
* Tidy up after the installation of the new security attributes of a
* process being transformed by an execve operation. The new credentials
* have, by this point, been set to @current->cred. @bprm points to the
* linux_binprm structure. This hook is a good place to perform state
* changes on the process such as clearing out non-inheritable signal
* state. This is called immediately after commit_creds().
* @bprm_secureexec:
* Return a boolean value (0 or 1) indicating whether a "secure exec"
* is required. The flag is passed in the auxiliary table
@@ -585,15 +575,31 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* manual page for definitions of the @clone_flags.
* @clone_flags contains the flags indicating what should be shared.
* Return 0 if permission is granted.
* @task_alloc_security:
* @p contains the task_struct for child process.
* Allocate and attach a security structure to the p->security field. The
* security field is initialized to NULL when the task structure is
* allocated.
* Return 0 if operation was successful.
* @task_free_security:
* @p contains the task_struct for process.
* Deallocate and clear the p->security field.
* @cred_free:
* @cred points to the credentials.
* Deallocate and clear the cred->security field in a set of credentials.
* @cred_prepare:
* @new points to the new credentials.
* @old points to the original credentials.
* @gfp indicates the atomicity of any memory allocations.
* Prepare a new set of credentials by copying the data from the old set.
* @cred_commit:
* @new points to the new credentials.
* @old points to the original credentials.
* Install a new set of credentials.
* @kernel_act_as:
* Set the credentials for a kernel service to act as (subjective context).
* @new points to the credentials to be modified.
* @secid specifies the security ID to be set
* The current task must be the one that nominated @secid.
* Return 0 if successful.
* @kernel_create_files_as:
* Set the file creation context in a set of credentials to be the same as
* the objective context of the specified inode.
* @new points to the credentials to be modified.
* @inode points to the inode to use as a reference.
* The current task must be the one that nominated @inode.
* Return 0 if successful.
* @task_setuid:
* Check permission before setting one or more of the user identity
* attributes of the current process. The @flags parameter indicates
@@ -606,15 +612,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @id2 contains a uid.
* @flags contains one of the LSM_SETID_* values.
* Return 0 if permission is granted.
* @task_post_setuid:
* @task_fix_setuid:
* Update the module's state after setting one or more of the user
* identity attributes of the current process. The @flags parameter
* indicates which of the set*uid system calls invoked this hook. If
* @flags is LSM_SETID_FS, then @old_ruid is the old fs uid and the other
* parameters are not used.
* @old_ruid contains the old real uid (or fs uid if LSM_SETID_FS).
* @old_euid contains the old effective uid (or -1 if LSM_SETID_FS).
* @old_suid contains the old saved uid (or -1 if LSM_SETID_FS).
* @new is the set of credentials that will be installed. Modifications
* should be made to this rather than to @current->cred.
* @old is the set of credentials that are being replaces
* @flags contains one of the LSM_SETID_* values.
* Return 0 on success.
* @task_setgid:
@@ -717,13 +721,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @arg3 contains a argument.
* @arg4 contains a argument.
* @arg5 contains a argument.
* @rc_p contains a pointer to communicate back the forced return code
* Return 0 if permission is granted, and non-zero if the security module
* has taken responsibility (setting *rc_p) for the prctl call.
* @task_reparent_to_init:
* Set the security attributes in @p->security for a kernel thread that
* is being reparented to the init task.
* @p contains the task_struct for the kernel thread.
* Return -ENOSYS if no-one wanted to handle this op, any other value to
* cause prctl() to return immediately with that value.
* @task_to_inode:
* Set the security attributes for an inode based on an associated task's
* security attributes, e.g. for /proc/pid inodes.
@@ -1000,7 +999,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* See whether a specific operational right is granted to a process on a
* key.
* @key_ref refers to the key (key pointer + possession attribute bit).
* @context points to the process to provide the context against which to
* @cred points to the credentials to provide the context against which to
* evaluate the security data on the key.
* @perm describes the combination of permissions required of this key.
* Return 1 if permission granted, 0 if permission denied and -ve it the
@@ -1162,6 +1161,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @child process.
* Security modules may also want to perform a process tracing check
* during an execve in the set_security or apply_creds hooks of
* tracing check during an execve in the bprm_set_creds hook of
* binprm_security_ops if the process is being traced and its security
* attributes would be changed by the execve.
* @child contains the task_struct structure for the target process.
@@ -1185,29 +1185,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inheritable contains the inheritable capability set.
* @permitted contains the permitted capability set.
* Return 0 if the capability sets were successfully obtained.
* @capset_check:
* Check permission before setting the @effective, @inheritable, and
* @permitted capability sets for the @target process.
* Caveat: @target is also set to current if a set of processes is
* specified (i.e. all processes other than current and init or a
* particular process group). Hence, the capset_set hook may need to
* revalidate permission to the actual target process.
* @target contains the task_struct structure for target process.
* @effective contains the effective capability set.
* @inheritable contains the inheritable capability set.
* @permitted contains the permitted capability set.
* Return 0 if permission is granted.
* @capset_set:
* @capset:
* Set the @effective, @inheritable, and @permitted capability sets for
* the @target process. Since capset_check cannot always check permission
* to the real @target process, this hook may also perform permission
* checking to determine if the current process is allowed to set the
* capability sets of the @target process. However, this hook has no way
* of returning an error due to the structure of the sys_capset code.
* @target contains the task_struct structure for target process.
* the current process.
* @new contains the new credentials structure for target process.
* @old contains the current credentials structure for target process.
* @effective contains the effective capability set.
* @inheritable contains the inheritable capability set.
* @permitted contains the permitted capability set.
* Return 0 and update @new if permission is granted.
* @capable:
* Check whether the @tsk process has the @cap capability.
* @tsk contains the task_struct for the process.
@@ -1299,15 +1285,12 @@ struct security_operations {
int (*capget) (struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted);
int (*capset_check) (struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
void (*capset_set) (struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
int (*capable) (struct task_struct *tsk, int cap);
int (*capset) (struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
int (*capable) (struct task_struct *tsk, int cap, int audit);
int (*acct) (struct file *file);
int (*sysctl) (struct ctl_table *table, int op);
int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
@@ -1316,18 +1299,16 @@ struct security_operations {
int (*settime) (struct timespec *ts, struct timezone *tz);
int (*vm_enough_memory) (struct mm_struct *mm, long pages);
int (*bprm_alloc_security) (struct linux_binprm *bprm);
void (*bprm_free_security) (struct linux_binprm *bprm);
void (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe);
void (*bprm_post_apply_creds) (struct linux_binprm *bprm);
int (*bprm_set_security) (struct linux_binprm *bprm);
int (*bprm_set_creds) (struct linux_binprm *bprm);
int (*bprm_check_security) (struct linux_binprm *bprm);
int (*bprm_secureexec) (struct linux_binprm *bprm);
void (*bprm_committing_creds) (struct linux_binprm *bprm);
void (*bprm_committed_creds) (struct linux_binprm *bprm);
int (*sb_alloc_security) (struct super_block *sb);
void (*sb_free_security) (struct super_block *sb);
int (*sb_copy_data) (char *orig, char *copy);
int (*sb_kern_mount) (struct super_block *sb, void *data);
int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
int (*sb_statfs) (struct dentry *dentry);
int (*sb_mount) (char *dev_name, struct path *path,
@@ -1406,14 +1387,18 @@ struct security_operations {
int (*file_send_sigiotask) (struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive) (struct file *file);
int (*dentry_open) (struct file *file);
int (*dentry_open) (struct file *file, const struct cred *cred);
int (*task_create) (unsigned long clone_flags);
int (*task_alloc_security) (struct task_struct *p);
void (*task_free_security) (struct task_struct *p);
void (*cred_free) (struct cred *cred);
int (*cred_prepare)(struct cred *new, const struct cred *old,
gfp_t gfp);
void (*cred_commit)(struct cred *new, const struct cred *old);
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ ,
uid_t old_euid, uid_t old_suid, int flags);
int (*task_fix_setuid) (struct cred *new, const struct cred *old,
int flags);
int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags);
int (*task_setpgid) (struct task_struct *p, pid_t pgid);
int (*task_getpgid) (struct task_struct *p);
@@ -1433,8 +1418,7 @@ struct security_operations {
int (*task_wait) (struct task_struct *p);
int (*task_prctl) (int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
unsigned long arg5, long *rc_p);
void (*task_reparent_to_init) (struct task_struct *p);
unsigned long arg5);
void (*task_to_inode) (struct task_struct *p, struct inode *inode);
int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
@@ -1539,10 +1523,10 @@ struct security_operations {
/* key management security hooks */
#ifdef CONFIG_KEYS
int (*key_alloc) (struct key *key, struct task_struct *tsk, unsigned long flags);
int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags);
void (*key_free) (struct key *key);
int (*key_permission) (key_ref_t key_ref,
struct task_struct *context,
const struct cred *cred,
key_perm_t perm);
int (*key_getsecurity)(struct key *key, char **_buffer);
#endif /* CONFIG_KEYS */
@@ -1568,15 +1552,12 @@ int security_capget(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
int security_capset_check(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
void security_capset_set(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
int security_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
int security_capable(struct task_struct *tsk, int cap);
int security_capable_noaudit(struct task_struct *tsk, int cap);
int security_acct(struct file *file);
int security_sysctl(struct ctl_table *table, int op);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
@@ -1586,17 +1567,15 @@ int security_settime(struct timespec *ts, struct timezone *tz);
int security_vm_enough_memory(long pages);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_vm_enough_memory_kern(long pages);
int security_bprm_alloc(struct linux_binprm *bprm);
void security_bprm_free(struct linux_binprm *bprm);
void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
void security_bprm_post_apply_creds(struct linux_binprm *bprm);
int security_bprm_set(struct linux_binprm *bprm);
int security_bprm_set_creds(struct linux_binprm *bprm);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
int security_bprm_secureexec(struct linux_binprm *bprm);
int security_sb_alloc(struct super_block *sb);
void security_sb_free(struct super_block *sb);
int security_sb_copy_data(char *orig, char *copy);
int security_sb_kern_mount(struct super_block *sb, void *data);
int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(char *dev_name, struct path *path,
@@ -1663,13 +1642,16 @@ int security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_dentry_open(struct file *file);
int security_dentry_open(struct file *file, const struct cred *cred);
int security_task_create(unsigned long clone_flags);
int security_task_alloc(struct task_struct *p);
void security_task_free(struct task_struct *p);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_commit_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
int security_task_post_setuid(uid_t old_ruid, uid_t old_euid,
uid_t old_suid, int flags);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
@@ -1688,8 +1670,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid);
int security_task_wait(struct task_struct *p);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5, long *rc_p);
void security_task_reparent_to_init(struct task_struct *p);
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
@@ -1764,25 +1745,23 @@ static inline int security_capget(struct task_struct *target,
return cap_capget(target, effective, inheritable, permitted);
}
static inline int security_capset_check(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
static inline int security_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
return cap_capset_check(target, effective, inheritable, permitted);
}
static inline void security_capset_set(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
cap_capset_set(target, effective, inheritable, permitted);
return cap_capset(new, old, effective, inheritable, permitted);
}
static inline int security_capable(struct task_struct *tsk, int cap)
{
return cap_capable(tsk, cap);
return cap_capable(tsk, cap, SECURITY_CAP_AUDIT);
}
static inline int security_capable_noaudit(struct task_struct *tsk, int cap)
{
return cap_capable(tsk, cap, SECURITY_CAP_NOAUDIT);
}
static inline int security_acct(struct file *file)
@@ -1835,27 +1814,9 @@ static inline int security_vm_enough_memory_kern(long pages)
return cap_vm_enough_memory(current->mm, pages);
}
static inline int security_bprm_alloc(struct linux_binprm *bprm)
static inline int security_bprm_set_creds(struct linux_binprm *bprm)
{
return 0;
}
static inline void security_bprm_free(struct linux_binprm *bprm)
{ }
static inline void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe)
{
cap_bprm_apply_creds(bprm, unsafe);
}
static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm)
{
return;
}
static inline int security_bprm_set(struct linux_binprm *bprm)
{
return cap_bprm_set_security(bprm);
return cap_bprm_set_creds(bprm);
}
static inline int security_bprm_check(struct linux_binprm *bprm)
@@ -1863,6 +1824,14 @@ static inline int security_bprm_check(struct linux_binprm *bprm)
return 0;
}
static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
{
}
static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
{
}
static inline int security_bprm_secureexec(struct linux_binprm *bprm)
{
return cap_bprm_secureexec(bprm);
@@ -1881,7 +1850,7 @@ static inline int security_sb_copy_data(char *orig, char *copy)
return 0;
}
static inline int security_sb_kern_mount(struct super_block *sb, void *data)
static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
return 0;
}
@@ -2177,7 +2146,8 @@ static inline int security_file_receive(struct file *file)
return 0;
}
static inline int security_dentry_open(struct file *file)
static inline int security_dentry_open(struct file *file,
const struct cred *cred)
{
return 0;
}
@@ -2187,13 +2157,31 @@ static inline int security_task_create(unsigned long clone_flags)
return 0;
}
static inline int security_task_alloc(struct task_struct *p)
static inline void security_cred_free(struct cred *cred)
{ }
static inline int security_prepare_creds(struct cred *new,
const struct cred *old,
gfp_t gfp)
{
return 0;
}
static inline void security_task_free(struct task_struct *p)
{ }
static inline void security_commit_creds(struct cred *new,
const struct cred *old)
{
}
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
}
static inline int security_kernel_create_files_as(struct cred *cred,
struct inode *inode)
{
return 0;
}
static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
int flags)
@@ -2201,10 +2189,11 @@ static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
return 0;
}
static inline int security_task_post_setuid(uid_t old_ruid, uid_t old_euid,
uid_t old_suid, int flags)
static inline int security_task_fix_setuid(struct cred *new,
const struct cred *old,
int flags)
{
return cap_task_post_setuid(old_ruid, old_euid, old_suid, flags);
return cap_task_fix_setuid(new, old, flags);
}
static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2,
@@ -2291,14 +2280,9 @@ static inline int security_task_wait(struct task_struct *p)
static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5, long *rc_p)
unsigned long arg5)
{
return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p);
}
static inline void security_task_reparent_to_init(struct task_struct *p)
{
cap_task_reparent_to_init(p);
return cap_task_prctl(option, arg2, arg3, arg3, arg5);
}
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
@@ -2724,16 +2708,16 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi
#ifdef CONFIG_KEYS
#ifdef CONFIG_SECURITY
int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long flags);
int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
void security_key_free(struct key *key);
int security_key_permission(key_ref_t key_ref,
struct task_struct *context, key_perm_t perm);
const struct cred *cred, key_perm_t perm);
int security_key_getsecurity(struct key *key, char **_buffer);
#else
static inline int security_key_alloc(struct key *key,
struct task_struct *tsk,
const struct cred *cred,
unsigned long flags)
{
return 0;
@@ -2744,7 +2728,7 @@ static inline void security_key_free(struct key *key)
}
static inline int security_key_permission(key_ref_t key_ref,
struct task_struct *context,
const struct cred *cred,
key_perm_t perm)
{
return 0;

View File

@@ -34,6 +34,7 @@ struct seq_operations {
#define SEQ_SKIP 1
char *mangle_path(char *s, char *p, char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);

View File

@@ -250,6 +250,9 @@ typedef unsigned char *sk_buff_data_t;
* @tc_verd: traffic control verdict
* @ndisc_nodetype: router type (from link layer)
* @do_not_encrypt: set to prevent encryption of this frame
* @requeue: set to indicate that the wireless core should attempt
* a software retry on this frame if we failed to
* receive an ACK for it
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
@@ -269,8 +272,9 @@ struct sk_buff {
struct dst_entry *dst;
struct rtable *rtable;
};
#ifdef CONFIG_XFRM
struct sec_path *sp;
#endif
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
@@ -325,6 +329,7 @@ struct sk_buff {
#endif
#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
__u8 do_not_encrypt:1;
__u8 requeue:1;
#endif
/* 0/13/14 bit hole */
@@ -487,6 +492,19 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
return (skb->next == (struct sk_buff *) list);
}
/**
* skb_queue_is_first - check if skb is the first entry in the queue
* @list: queue head
* @skb: buffer
*
* Returns true if @skb is the first buffer on the list.
*/
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return (skb->prev == (struct sk_buff *) list);
}
/**
* skb_queue_next - return the next packet in the queue
* @list: queue head
@@ -505,6 +523,24 @@ static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
return skb->next;
}
/**
* skb_queue_prev - return the prev packet in the queue
* @list: queue head
* @skb: current buffer
*
* Return the prev packet in @list before @skb. It is only valid to
* call this if skb_queue_is_first() evaluates to false.
*/
static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
/* This BUG_ON may seem severe, but if we just return then we
* are going to dereference garbage.
*/
BUG_ON(skb_queue_is_first(list, skb));
return skb->prev;
}
/**
* skb_get - reference buffer
* @skb: buffer to reference
@@ -1647,8 +1683,12 @@ extern int skb_splice_bits(struct sk_buff *skb,
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
extern int skb_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
@@ -1864,6 +1904,18 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu
to->queue_mapping = from->queue_mapping;
}
#ifdef CONFIG_XFRM
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
return skb->sp;
}
#else
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
return NULL;
}
#endif
static inline int skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;

View File

@@ -149,6 +149,8 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void init_call_single_data(void)
{
}

47
include/linux/smsc911x.h Normal file
View File

@@ -0,0 +1,47 @@
/***************************************************************************
*
* Copyright (C) 2004-2008 SMSC
* Copyright (C) 2005-2008 ARM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
***************************************************************************/
#ifndef __LINUX_SMSC911X_H__
#define __LINUX_SMSC911X_H__
#include <linux/phy.h>
/* platform_device configuration data, should be assigned to
* the platform_device's dev.platform_data */
struct smsc911x_platform_config {
unsigned int irq_polarity;
unsigned int irq_type;
unsigned int flags;
phy_interface_t phy_interface;
};
/* Constants for platform_device irq polarity configuration */
#define SMSC911X_IRQ_POLARITY_ACTIVE_LOW 0
#define SMSC911X_IRQ_POLARITY_ACTIVE_HIGH 1
/* Constants for platform_device irq type configuration */
#define SMSC911X_IRQ_TYPE_OPEN_DRAIN 0
#define SMSC911X_IRQ_TYPE_PUSH_PULL 1
/* Constants for flags */
#define SMSC911X_USE_16BIT (BIT(0))
#define SMSC911X_USE_32BIT (BIT(1))
#endif /* __LINUX_SMSC911X_H__ */

View File

@@ -216,6 +216,9 @@ enum
LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */
LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */
LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */
LINUX_MIB_SACKSHIFTED,
LINUX_MIB_SACKMERGED,
LINUX_MIB_SACKSHIFTFALLBACK,
__LINUX_MIB_MAX
};

View File

@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
#else
# define save_stack_trace_user(trace) do { } while (0)
#endif
#else
# define save_stack_trace(trace) do { } while (0)
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
#endif

View File

@@ -139,14 +139,14 @@ static inline char *__svc_print_addr(struct sockaddr *addr,
{
switch (addr->sa_family) {
case AF_INET:
snprintf(buf, len, "%u.%u.%u.%u, port=%u",
NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
snprintf(buf, len, "%pI4, port=%u",
&((struct sockaddr_in *)addr)->sin_addr,
ntohs(((struct sockaddr_in *) addr)->sin_port));
break;
case AF_INET6:
snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
snprintf(buf, len, "%pI6, port=%u",
&((struct sockaddr_in6 *)addr)->sin6_addr,
ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
break;

View File

@@ -49,7 +49,7 @@
for_each_online_node(node) \
if (nr_cpus_node(node))
void arch_update_cpu_topology(void);
int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10

View File

@@ -24,8 +24,12 @@ struct tracepoint {
const char *name; /* Tracepoint name */
int state; /* State. */
void **funcs;
} __attribute__((aligned(8)));
} __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is
* globally visible and gcc happily
* align these on the structure size.
* Keep in sync with vmlinux.lds.h.
*/
#define TPPROTO(args...) args
#define TPARGS(args...) args
@@ -40,14 +44,14 @@ struct tracepoint {
do { \
void **it_func; \
\
rcu_read_lock_sched(); \
rcu_read_lock_sched_notrace(); \
it_func = rcu_dereference((tp)->funcs); \
if (it_func) { \
do { \
((void(*)(proto))(*it_func))(args); \
} while (*(++it_func)); \
} \
rcu_read_unlock_sched(); \
rcu_read_unlock_sched_notrace(); \
} while (0)
/*
@@ -55,35 +59,40 @@ struct tracepoint {
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
#define DEFINE_TRACE(name, proto, args) \
#define DECLARE_TRACE(name, proto, args) \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) \
= #name ":" #proto; \
static struct tracepoint __tracepoint_##name \
__attribute__((section("__tracepoints"), aligned(8))) = \
{ __tpstrtab_##name, 0, NULL }; \
if (unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \
TPPROTO(proto), TPARGS(args)); \
} \
static inline int register_trace_##name(void (*probe)(proto)) \
{ \
return tracepoint_probe_register(#name ":" #proto, \
(void *)probe); \
return tracepoint_probe_register(#name, (void *)probe); \
} \
static inline void unregister_trace_##name(void (*probe)(proto))\
static inline int unregister_trace_##name(void (*probe)(proto)) \
{ \
tracepoint_probe_unregister(#name ":" #proto, \
(void *)probe); \
return tracepoint_probe_unregister(#name, (void *)probe);\
}
#define DEFINE_TRACE(name) \
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \
__attribute__((section("__tracepoints"), aligned(32))) = \
{ __tpstrtab_##name, 0, NULL }
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
EXPORT_SYMBOL_GPL(__tracepoint_##name)
#define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name)
extern void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end);
#else /* !CONFIG_TRACEPOINTS */
#define DEFINE_TRACE(name, proto, args) \
#define DECLARE_TRACE(name, proto, args) \
static inline void _do_trace_##name(struct tracepoint *tp, proto) \
{ } \
static inline void trace_##name(proto) \
@@ -92,8 +101,14 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
{ \
return -ENOSYS; \
} \
static inline void unregister_trace_##name(void (*probe)(proto))\
{ }
static inline int unregister_trace_##name(void (*probe)(proto)) \
{ \
return -ENOSYS; \
}
#define DEFINE_TRACE(name)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
static inline void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end)
@@ -112,6 +127,10 @@ extern int tracepoint_probe_register(const char *name, void *probe);
*/
extern int tracepoint_probe_unregister(const char *name, void *probe);
extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
extern void tracepoint_probe_update_all(void);
struct tracepoint_iter {
struct module *module;
struct tracepoint *tracepoint;

View File

@@ -325,7 +325,7 @@ extern struct class *tty_class;
* go away
*/
extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
kref_get(&tty->kref);
@@ -442,6 +442,7 @@ extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
size_t size);
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
extern void tty_audit_push(struct tty_struct *tty);
extern void tty_audit_push_task(struct task_struct *tsk,
uid_t loginuid, u32 sessionid);
@@ -450,6 +451,9 @@ static inline void tty_audit_add_data(struct tty_struct *tty,
unsigned char *data, size_t size)
{
}
static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
{
}
static inline void tty_audit_exit(void)
{
}

View File

@@ -158,8 +158,12 @@ struct usb_ctrlrequest {
* (rarely) accepted by SET_DESCRIPTOR.
*
* Note that all multi-byte values here are encoded in little endian
* byte order "on the wire". But when exposed through Linux-USB APIs,
* they've been converted to cpu byte order.
* byte order "on the wire". Within the kernel and when exposed
* through the Linux-USB APIs, they are not converted to cpu byte
* order; it is the responsibility of the client code to do this.
* The single exception is when device and configuration descriptors (but
* not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
* in this case the fields are converted to host endianness by the kernel.
*/
/*

View File

@@ -12,7 +12,7 @@
struct user_namespace {
struct kref kref;
struct hlist_head uidhash_table[UIDHASH_SZ];
struct user_struct *root_user;
struct user_struct *creator;
};
extern struct user_namespace init_user_ns;
@@ -26,8 +26,7 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
return ns;
}
extern struct user_namespace *copy_user_ns(int flags,
struct user_namespace *old_ns);
extern int create_user_ns(struct cred *new);
extern void free_user_ns(struct kref *kref);
static inline void put_user_ns(struct user_namespace *ns)
@@ -43,13 +42,9 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
return &init_user_ns;
}
static inline struct user_namespace *copy_user_ns(int flags,
struct user_namespace *old_ns)
static inline int create_user_ns(struct cred *new)
{
if (flags & CLONE_NEWUSER)
return ERR_PTR(-EINVAL);
return old_ns;
return -EINVAL;
}
static inline void put_user_ns(struct user_namespace *ns)

View File

@@ -20,6 +20,7 @@
#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
struct virtio_net_config
{
@@ -44,4 +45,12 @@ struct virtio_net_hdr
__u16 csum_start; /* Position to start checksumming from */
__u16 csum_offset; /* Offset after that to place checksum */
};
/* This is the version of the header to use when the MRG_RXBUF
* feature has been negotiated. */
struct virtio_net_hdr_mrg_rxbuf {
struct virtio_net_hdr hdr;
__u16 num_buffers; /* Number of merged rx buffers */
};
#endif /* _LINUX_VIRTIO_NET_H */

View File

@@ -199,6 +199,9 @@ enum {
#define XFRM_MSG_NEWSPDINFO XFRM_MSG_NEWSPDINFO
XFRM_MSG_GETSPDINFO,
#define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO
XFRM_MSG_MAPPING,
#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
__XFRM_MSG_MAX
};
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -438,6 +441,15 @@ struct xfrm_user_migrate {
__u16 new_family;
};
struct xfrm_user_mapping {
struct xfrm_usersa_id id;
__u32 reqid;
xfrm_address_t old_saddr;
xfrm_address_t new_saddr;
__be16 old_sport;
__be16 new_sport;
};
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
@@ -464,6 +476,8 @@ enum xfrm_nlgroups {
#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT
XFRMNLGRP_MIGRATE,
#define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE
XFRMNLGRP_MAPPING,
#define XFRMNLGRP_MAPPING XFRMNLGRP_MAPPING
__XFRMNLGRP_MAX
};
#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1)