Merge commit 'origin/master' into next

Conflicts:
	include/linux/kvm.h
This commit is contained in:
Benjamin Herrenschmidt
2009-12-09 17:14:38 +11:00
3651 changed files with 249180 additions and 115589 deletions

View File

@@ -330,6 +330,7 @@ unifdef-y += scc.h
unifdef-y += sched.h
unifdef-y += screen_info.h
unifdef-y += sdla.h
unifdef-y += securebits.h
unifdef-y += selinux_netlink.h
unifdef-y += sem.h
unifdef-y += serial_core.h

View File

@@ -75,6 +75,7 @@ enum {
ATA_ID_EIDE_DMA_TIME = 66,
ATA_ID_EIDE_PIO = 67,
ATA_ID_EIDE_PIO_IORDY = 68,
ATA_ID_ADDITIONAL_SUPP = 69,
ATA_ID_QUEUE_DEPTH = 75,
ATA_ID_MAJOR_VER = 80,
ATA_ID_COMMAND_SET_1 = 82,
@@ -87,6 +88,7 @@ enum {
ATA_ID_HW_CONFIG = 93,
ATA_ID_SPG = 98,
ATA_ID_LBA_CAPACITY_2 = 100,
ATA_ID_SECTOR_SIZE = 106,
ATA_ID_LAST_LUN = 126,
ATA_ID_DLF = 128,
ATA_ID_CSFO = 129,
@@ -638,6 +640,18 @@ static inline int ata_id_flush_ext_enabled(const u16 *id)
return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
}
static inline int ata_id_has_large_logical_sectors(const u16 *id)
{
if ((id[ATA_ID_SECTOR_SIZE] & 0xc000) != 0x4000)
return 0;
return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
}
static inline u8 ata_id_logical_per_physical_sectors(const u16 *id)
{
return id[ATA_ID_SECTOR_SIZE] & 0xf;
}
static inline int ata_id_has_lba48(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@@ -803,6 +817,16 @@ static inline int ata_id_has_trim(const u16 *id)
return 0;
}
static inline int ata_id_has_zero_after_trim(const u16 *id)
{
/* DSM supported, deterministic read, and read zero after trim set */
if (ata_id_has_trim(id) &&
(id[ATA_ID_ADDITIONAL_SUPP] & 0x4020) == 0x4020)
return 1;
return 0;
}
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -958,17 +982,17 @@ static inline void ata_id_to_hd_driveid(u16 *id)
}
/*
* Write up to 'max' LBA Range Entries to the buffer that will cover the
* extent from sector to sector + count. This is used for TRIM and for
* ADD LBA(S) TO NV CACHE PINNED SET.
* Write LBA Range Entries to the buffer that will cover the extent from
* sector to sector + count. This is used for TRIM and for ADD LBA(S)
* TO NV CACHE PINNED SET.
*/
static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max,
u64 sector, unsigned long count)
static inline unsigned ata_set_lba_range_entries(void *_buffer,
unsigned buf_size, u64 sector, unsigned long count)
{
__le64 *buffer = _buffer;
unsigned i = 0;
unsigned i = 0, used_bytes;
while (i < max) {
while (i < buf_size / 8 ) { /* 6-byte LBA + 2-byte range per entry */
u64 entry = sector |
((u64)(count > 0xffff ? 0xffff : count) << 48);
buffer[i++] = __cpu_to_le64(entry);
@@ -978,9 +1002,9 @@ static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max,
sector += 0xffff;
}
max = ALIGN(i * 8, 512);
memset(buffer + i, 0, max - i * 8);
return max;
used_bytes = ALIGN(i * 8, 512);
memset(buffer + i, 0, used_bytes - i * 8);
return used_bytes;
}
static inline int is_multi_taskfile(struct ata_taskfile *tf)

View File

@@ -331,4 +331,17 @@ static inline int bdi_sched_wait(void *word)
return 0;
}
static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
struct page *page)
{
if (bdi && bdi->unplug_io_fn)
bdi->unplug_io_fn(bdi, page);
}
static inline void blk_run_address_space(struct address_space *mapping)
{
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
#endif /* _LINUX_BACKING_DEV_H */

View File

@@ -391,6 +391,18 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
gfp_t, int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
#endif
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
extern void bio_flush_dcache_pages(struct bio *bi);
#else
static inline void bio_flush_dcache_pages(struct bio *bi)
{
}
#endif
extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
unsigned long, unsigned int, int, gfp_t);
extern struct bio *bio_copy_user_iov(struct request_queue *,
@@ -450,11 +462,8 @@ extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
/*
* remember never ever reenable interrupts between a bvec_kmap_irq and
* bvec_kunmap_irq!
*
* This function MUST be inlined - it plays with the CPU interrupt flags.
*/
static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec,
unsigned long *flags)
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
{
unsigned long addr;
@@ -470,8 +479,7 @@ static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec,
return (char *) addr + bvec->bv_offset;
}
static __always_inline void bvec_kunmap_irq(char *buffer,
unsigned long *flags)
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;

View File

@@ -312,13 +312,17 @@ struct queue_limits {
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char no_cluster;
signed char discard_zeroes_data;
};
struct request_queue
@@ -749,6 +753,17 @@ struct req_iterator {
#define rq_iter_last(rq, _iter) \
(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
#endif
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
extern void rq_flush_dcache_pages(struct request *rq);
#else
static inline void rq_flush_dcache_pages(struct request *rq)
{
}
#endif
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
@@ -823,19 +838,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
return bdev->bd_disk->queue;
}
static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
struct page *page)
{
if (bdi && bdi->unplug_io_fn)
bdi->unplug_io_fn(bdi, page);
}
static inline void blk_run_address_space(struct address_space *mapping)
{
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
@@ -1134,6 +1136,34 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
return q->limits.alignment_offset;
}
static inline int queue_discard_alignment(struct request_queue *q)
{
if (q->limits.discard_misaligned)
return -1;
return q->limits.discard_alignment;
}
static inline int queue_sector_discard_alignment(struct request_queue *q,
sector_t sector)
{
return ((sector << 9) - q->limits.discard_alignment)
& (q->limits.discard_granularity - 1);
}
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
{
if (q->limits.discard_zeroes_data == 1)
return 1;
return 0;
}
static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
{
return queue_discard_zeroes_data(bdev_get_queue(bdev));
}
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;

View File

@@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat,
unsigned long addr,
unsigned long size);
extern void free_bootmem(unsigned long addr, unsigned long size);
extern void free_bootmem_late(unsigned long addr, unsigned long size);
/*
* Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,

View File

@@ -1,6 +1,13 @@
#define PHY_BRCM_WIRESPEED_ENABLE 0x00000001
#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000002
#define PHY_BRCM_APD_CLK125_ENABLE 0x00000004
#define PHY_BRCM_STD_IBND_DISABLE 0x00000008
#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00000010
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00000020
#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
#define PHY_BCM_FLAGS_VALID 0x80000000

View File

@@ -32,14 +32,12 @@
* struct can_proto - CAN protocol structure
* @type: type argument in socket() syscall, e.g. SOCK_DGRAM.
* @protocol: protocol number in socket() syscall.
* @capability: capability needed to open the socket, or -1 for no restriction.
* @ops: pointer to struct proto_ops for sock->ops.
* @prot: pointer to struct proto structure.
*/
struct can_proto {
int type;
int protocol;
int capability;
struct proto_ops *ops;
struct proto *prot;
};

View File

@@ -29,8 +29,6 @@ enum can_mode {
/*
* CAN common private data
*/
#define CAN_ECHO_SKB_MAX 4
struct can_priv {
struct can_device_stats can_stats;
@@ -44,15 +42,16 @@ struct can_priv {
int restart_ms;
struct timer_list restart_timer;
struct sk_buff *echo_skb[CAN_ECHO_SKB_MAX];
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
unsigned int echo_skb_max;
struct sk_buff **echo_skb;
};
struct net_device *alloc_candev(int sizeof_priv);
struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
void free_candev(struct net_device *dev);
int open_candev(struct net_device *dev);
@@ -64,8 +63,13 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx);
void can_get_echo_skb(struct net_device *dev, int idx);
void can_free_echo_skb(struct net_device *dev, int idx);
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
void can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
struct sk_buff *alloc_can_err_skb(struct net_device *dev,
struct can_frame **cf);
#endif /* CAN_DEV_H */

View File

@@ -0,0 +1,36 @@
#ifndef __CAN_PLATFORM_MCP251X_H__
#define __CAN_PLATFORM_MCP251X_H__
/*
*
* CAN bus driver for Microchip 251x CAN Controller with SPI Interface
*
*/
#include <linux/spi/spi.h>
/**
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz
* @model: - actual type of chip
* @board_specific_setup: - called before probing the chip (power,reset)
* @transceiver_enable: - called to power on/off the transceiver
* @power_enable: - called to power on/off the mcp *and* the
* transceiver
*
* Please note that you should define power_enable or transceiver_enable or
* none of them. Defining both of them is no use.
*
*/
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
int model;
#define CAN_MCP251X_MCP2510 0
#define CAN_MCP251X_MCP2515 1
int (*board_specific_setup)(struct spi_device *spi);
int (*transceiver_enable)(int enable);
int (*power_enable) (int enable);
};
#endif /* __CAN_PLATFORM_MCP251X_H__ */

View File

@@ -0,0 +1,40 @@
/*
* TI HECC (High End CAN Controller) driver platform header
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed as is WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/**
* struct hecc_platform_data - HECC Platform Data
*
* @scc_hecc_offset: mostly 0 - should really never change
* @scc_ram_offset: SCC RAM offset
* @hecc_ram_offset: HECC RAM offset
* @mbx_offset: Mailbox RAM offset
* @int_line: Interrupt line to use - 0 or 1
* @version: version for future use
*
* Platform data structure to get all platform specific settings.
* this structure also accounts the fact that the IP may have different
* RAM and mailbox offsets for different SOC's
*/
struct ti_hecc_platform_data {
u32 scc_hecc_offset;
u32 scc_ram_offset;
u32 hecc_ram_offset;
u32 mbx_offset;
u32 int_line;
u32 version;
};

View File

@@ -92,9 +92,7 @@ struct vfs_cap_data {
#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
extern int file_caps_enabled;
#endif
typedef struct kernel_cap_struct {
__u32 cap[_KERNEL_CAPABILITY_U32S];

View File

@@ -60,3 +60,9 @@ SUBSYS(net_cls)
#endif
/* */
#ifdef CONFIG_BLK_CGROUP
SUBSYS(blkio)
#endif
/* */

View File

@@ -10,6 +10,8 @@
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
#include <linux/sem.h>
#include <linux/socket.h>
#include <linux/if.h>
#include <asm/compat.h>
#include <asm/siginfo.h>
@@ -154,6 +156,48 @@ typedef struct compat_sigevent {
} _sigev_un;
} compat_sigevent_t;
struct compat_ifmap {
compat_ulong_t mem_start;
compat_ulong_t mem_end;
unsigned short base_addr;
unsigned char irq;
unsigned char dma;
unsigned char port;
};
struct compat_if_settings
{
unsigned int type; /* Type of physical device or protocol */
unsigned int size; /* Size of the data allocated by the caller */
compat_uptr_t ifs_ifsu; /* union of pointers */
};
struct compat_ifreq {
union {
char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
} ifr_ifrn;
union {
struct sockaddr ifru_addr;
struct sockaddr ifru_dstaddr;
struct sockaddr ifru_broadaddr;
struct sockaddr ifru_netmask;
struct sockaddr ifru_hwaddr;
short ifru_flags;
compat_int_t ifru_ivalue;
compat_int_t ifru_mtu;
struct compat_ifmap ifru_map;
char ifru_slave[IFNAMSIZ]; /* Just fits the size */
char ifru_newname[IFNAMSIZ];
compat_caddr_t ifru_data;
struct compat_if_settings ifru_settings;
} ifr_ifru;
};
struct compat_ifconf {
compat_int_t ifc_len; /* size of buffer */
compat_caddr_t ifcbuf;
};
struct compat_robust_list {
compat_uptr_t next;
};

View File

@@ -79,6 +79,7 @@
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
#define __gcc_header(x) #x
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)

View File

@@ -36,4 +36,26 @@
the kernel context */
#define __cold __attribute__((__cold__))
#if __GNUC_MINOR__ >= 5
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
*
* Early snapshots of gcc 4.5 don't support this and we can't detect
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() __builtin_unreachable()
#endif
#endif
#if __GNUC_MINOR__ > 0
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
#if __GNUC_MINOR__ >= 4
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif

View File

@@ -144,6 +144,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier() __memory_barrier()
#endif
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
#endif
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
@@ -213,6 +218,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __maybe_unused /* unimplemented */
#endif
#ifndef __always_unused
# define __always_unused /* unimplemented */
#endif
#ifndef noinline
#define noinline
#endif
@@ -266,6 +275,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
#endif
#ifndef __compiletime_warning
# define __compiletime_warning(message)
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),

View File

@@ -43,6 +43,8 @@
#define CN_DST_VAL 0x1
#define CN_IDX_DM 0x7 /* Device Mapper */
#define CN_VAL_DM_USERSPACE_LOG 0x1
#define CN_IDX_DRBD 0x8
#define CN_VAL_DRBD 0x1
#define CN_NETLINK_USERS 8

View File

@@ -2,6 +2,7 @@
#define __CRYPTOHASH_H
#define SHA_DIGEST_WORDS 5
#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
#define SHA_WORKSPACE_WORDS 80
void sha_init(__u32 *buf);

View File

@@ -208,16 +208,9 @@ struct dmar_atsr_unit {
u8 include_all:1; /* include all ports */
};
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
#else
static inline int intel_iommu_init(void)
{
#ifdef CONFIG_INTR_REMAP
return dmar_dev_scope_init();
#else
return -ENODEV;
#endif
}
#endif /* !CONFIG_DMAR */
#else /* !CONFIG_DMAR: */
static inline int intel_iommu_init(void) { return -ENODEV; }
#endif /* CONFIG_DMAR */
#endif /* __DMAR_H__ */

View File

@@ -71,14 +71,12 @@
/* Structures */
struct dn_naddr
{
struct dn_naddr {
__le16 a_len;
__u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
};
struct sockaddr_dn
{
struct sockaddr_dn {
__u16 sdn_family;
__u8 sdn_flags;
__u8 sdn_objnum;
@@ -101,8 +99,7 @@ struct optdata_dn {
__u8 opt_data[16]; /* User data */
};
struct accessdata_dn
{
struct accessdata_dn {
__u8 acc_accl;
__u8 acc_acc[DN_MAXACCL];
__u8 acc_passl;

343
include/linux/drbd.h Normal file
View File

@@ -0,0 +1,343 @@
/*
drbd.h
Kernel module for 2.6.x Kernels
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2001-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2001-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
drbd is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
drbd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with drbd; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef DRBD_H
#define DRBD_H
#include <linux/connector.h>
#include <asm/types.h>
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/byteorder.h>
#else
#include <sys/types.h>
#include <sys/wait.h>
#include <limits.h>
/* Altough the Linux source code makes a difference between
generic endianness and the bitfields' endianness, there is no
architecture as of Linux-2.6.24-rc4 where the bitfileds' endianness
does not match the generic endianness. */
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define __LITTLE_ENDIAN_BITFIELD
#elif __BYTE_ORDER == __BIG_ENDIAN
#define __BIG_ENDIAN_BITFIELD
#else
# error "sorry, weird endianness on this box"
#endif
#endif
extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.6"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 91
enum drbd_io_error_p {
EP_PASS_ON, /* FIXME should the better be named "Ignore"? */
EP_CALL_HELPER,
EP_DETACH
};
enum drbd_fencing_p {
FP_DONT_CARE,
FP_RESOURCE,
FP_STONITH
};
enum drbd_disconnect_p {
DP_RECONNECT,
DP_DROP_NET_CONF,
DP_FREEZE_IO
};
enum drbd_after_sb_p {
ASB_DISCONNECT,
ASB_DISCARD_YOUNGER_PRI,
ASB_DISCARD_OLDER_PRI,
ASB_DISCARD_ZERO_CHG,
ASB_DISCARD_LEAST_CHG,
ASB_DISCARD_LOCAL,
ASB_DISCARD_REMOTE,
ASB_CONSENSUS,
ASB_DISCARD_SECONDARY,
ASB_CALL_HELPER,
ASB_VIOLENTLY
};
/* KEEP the order, do not delete or insert. Only append. */
enum drbd_ret_codes {
ERR_CODE_BASE = 100,
NO_ERROR = 101,
ERR_LOCAL_ADDR = 102,
ERR_PEER_ADDR = 103,
ERR_OPEN_DISK = 104,
ERR_OPEN_MD_DISK = 105,
ERR_DISK_NOT_BDEV = 107,
ERR_MD_NOT_BDEV = 108,
ERR_DISK_TO_SMALL = 111,
ERR_MD_DISK_TO_SMALL = 112,
ERR_BDCLAIM_DISK = 114,
ERR_BDCLAIM_MD_DISK = 115,
ERR_MD_IDX_INVALID = 116,
ERR_IO_MD_DISK = 118,
ERR_MD_INVALID = 119,
ERR_AUTH_ALG = 120,
ERR_AUTH_ALG_ND = 121,
ERR_NOMEM = 122,
ERR_DISCARD = 123,
ERR_DISK_CONFIGURED = 124,
ERR_NET_CONFIGURED = 125,
ERR_MANDATORY_TAG = 126,
ERR_MINOR_INVALID = 127,
ERR_INTR = 129, /* EINTR */
ERR_RESIZE_RESYNC = 130,
ERR_NO_PRIMARY = 131,
ERR_SYNC_AFTER = 132,
ERR_SYNC_AFTER_CYCLE = 133,
ERR_PAUSE_IS_SET = 134,
ERR_PAUSE_IS_CLEAR = 135,
ERR_PACKET_NR = 137,
ERR_NO_DISK = 138,
ERR_NOT_PROTO_C = 139,
ERR_NOMEM_BITMAP = 140,
ERR_INTEGRITY_ALG = 141, /* DRBD 8.2 only */
ERR_INTEGRITY_ALG_ND = 142, /* DRBD 8.2 only */
ERR_CPU_MASK_PARSE = 143, /* DRBD 8.2 only */
ERR_CSUMS_ALG = 144, /* DRBD 8.2 only */
ERR_CSUMS_ALG_ND = 145, /* DRBD 8.2 only */
ERR_VERIFY_ALG = 146, /* DRBD 8.2 only */
ERR_VERIFY_ALG_ND = 147, /* DRBD 8.2 only */
ERR_CSUMS_RESYNC_RUNNING= 148, /* DRBD 8.2 only */
ERR_VERIFY_RUNNING = 149, /* DRBD 8.2 only */
ERR_DATA_NOT_CURRENT = 150,
ERR_CONNECTED = 151, /* DRBD 8.3 only */
ERR_PERM = 152,
/* insert new ones above this line */
AFTER_LAST_ERR_CODE
};
#define DRBD_PROT_A 1
#define DRBD_PROT_B 2
#define DRBD_PROT_C 3
enum drbd_role {
R_UNKNOWN = 0,
R_PRIMARY = 1, /* role */
R_SECONDARY = 2, /* role */
R_MASK = 3,
};
/* The order of these constants is important.
* The lower ones (<C_WF_REPORT_PARAMS) indicate
* that there is no socket!
* >=C_WF_REPORT_PARAMS ==> There is a socket
*/
enum drbd_conns {
C_STANDALONE,
C_DISCONNECTING, /* Temporal state on the way to StandAlone. */
C_UNCONNECTED, /* >= C_UNCONNECTED -> inc_net() succeeds */
/* These temporal states are all used on the way
* from >= C_CONNECTED to Unconnected.
* The 'disconnect reason' states
* I do not allow to change beween them. */
C_TIMEOUT,
C_BROKEN_PIPE,
C_NETWORK_FAILURE,
C_PROTOCOL_ERROR,
C_TEAR_DOWN,
C_WF_CONNECTION,
C_WF_REPORT_PARAMS, /* we have a socket */
C_CONNECTED, /* we have introduced each other */
C_STARTING_SYNC_S, /* starting full sync by admin request. */
C_STARTING_SYNC_T, /* stariing full sync by admin request. */
C_WF_BITMAP_S,
C_WF_BITMAP_T,
C_WF_SYNC_UUID,
/* All SyncStates are tested with this comparison
* xx >= C_SYNC_SOURCE && xx <= C_PAUSED_SYNC_T */
C_SYNC_SOURCE,
C_SYNC_TARGET,
C_VERIFY_S,
C_VERIFY_T,
C_PAUSED_SYNC_S,
C_PAUSED_SYNC_T,
C_MASK = 31
};
enum drbd_disk_state {
D_DISKLESS,
D_ATTACHING, /* In the process of reading the meta-data */
D_FAILED, /* Becomes D_DISKLESS as soon as we told it the peer */
/* when >= D_FAILED it is legal to access mdev->bc */
D_NEGOTIATING, /* Late attaching state, we need to talk to the peer */
D_INCONSISTENT,
D_OUTDATED,
D_UNKNOWN, /* Only used for the peer, never for myself */
D_CONSISTENT, /* Might be D_OUTDATED, might be D_UP_TO_DATE ... */
D_UP_TO_DATE, /* Only this disk state allows applications' IO ! */
D_MASK = 15
};
union drbd_state {
/* According to gcc's docs is the ...
* The order of allocation of bit-fields within a unit (C90 6.5.2.1, C99 6.7.2.1).
* Determined by ABI.
* pointed out by Maxim Uvarov q<muvarov@ru.mvista.com>
* even though we transmit as "cpu_to_be32(state)",
* the offsets of the bitfields still need to be swapped
* on different endianess.
*/
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned role:2 ; /* 3/4 primary/secondary/unknown */
unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
unsigned conn:5 ; /* 17/32 cstates */
unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
unsigned susp:1 ; /* 2/2 IO suspended no/yes */
unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
unsigned peer_isp:1 ;
unsigned user_isp:1 ;
unsigned _pad:11; /* 0 unused */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned _pad:11; /* 0 unused */
unsigned user_isp:1 ;
unsigned peer_isp:1 ;
unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
unsigned susp:1 ; /* 2/2 IO suspended no/yes */
unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
unsigned conn:5 ; /* 17/32 cstates */
unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
unsigned role:2 ; /* 3/4 primary/secondary/unknown */
#else
# error "this endianess is not supported"
#endif
};
unsigned int i;
};
enum drbd_state_ret_codes {
SS_CW_NO_NEED = 4,
SS_CW_SUCCESS = 3,
SS_NOTHING_TO_DO = 2,
SS_SUCCESS = 1,
SS_UNKNOWN_ERROR = 0, /* Used to sleep longer in _drbd_request_state */
SS_TWO_PRIMARIES = -1,
SS_NO_UP_TO_DATE_DISK = -2,
SS_NO_LOCAL_DISK = -4,
SS_NO_REMOTE_DISK = -5,
SS_CONNECTED_OUTDATES = -6,
SS_PRIMARY_NOP = -7,
SS_RESYNC_RUNNING = -8,
SS_ALREADY_STANDALONE = -9,
SS_CW_FAILED_BY_PEER = -10,
SS_IS_DISKLESS = -11,
SS_DEVICE_IN_USE = -12,
SS_NO_NET_CONFIG = -13,
SS_NO_VERIFY_ALG = -14, /* drbd-8.2 only */
SS_NEED_CONNECTION = -15, /* drbd-8.2 only */
SS_LOWER_THAN_OUTDATED = -16,
SS_NOT_SUPPORTED = -17, /* drbd-8.2 only */
SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */
SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */
SS_AFTER_LAST_ERROR = -20, /* Keep this at bottom */
};
/* from drbd_strings.c */
extern const char *drbd_conn_str(enum drbd_conns);
extern const char *drbd_role_str(enum drbd_role);
extern const char *drbd_disk_str(enum drbd_disk_state);
extern const char *drbd_set_st_err_str(enum drbd_state_ret_codes);
#define SHARED_SECRET_MAX 64
#define MDF_CONSISTENT (1 << 0)
#define MDF_PRIMARY_IND (1 << 1)
#define MDF_CONNECTED_IND (1 << 2)
#define MDF_FULL_SYNC (1 << 3)
#define MDF_WAS_UP_TO_DATE (1 << 4)
#define MDF_PEER_OUT_DATED (1 << 5)
#define MDF_CRASHED_PRIMARY (1 << 6)
enum drbd_uuid_index {
UI_CURRENT,
UI_BITMAP,
UI_HISTORY_START,
UI_HISTORY_END,
UI_SIZE, /* nl-packet: number of dirty bits */
UI_FLAGS, /* nl-packet: flags */
UI_EXTENDED_SIZE /* Everything. */
};
enum drbd_timeout_flag {
UT_DEFAULT = 0,
UT_DEGRADED = 1,
UT_PEER_OUTDATED = 2,
};
#define UUID_JUST_CREATED ((__u64)4)
#define DRBD_MAGIC 0x83740267
#define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC)
/* these are of type "int" */
#define DRBD_MD_INDEX_INTERNAL -1
#define DRBD_MD_INDEX_FLEX_EXT -2
#define DRBD_MD_INDEX_FLEX_INT -3
/* Start of the new netlink/connector stuff */
#define DRBD_NL_CREATE_DEVICE 0x01
#define DRBD_NL_SET_DEFAULTS 0x02
/* For searching a vacant cn_idx value */
#define CN_IDX_STEP 6977
struct drbd_nl_cfg_req {
int packet_type;
unsigned int drbd_minor;
int flags;
unsigned short tag_list[];
};
struct drbd_nl_cfg_reply {
int packet_type;
unsigned int minor;
int ret_code; /* enum ret_code or set_st_err_t */
unsigned short tag_list[]; /* only used with get_* calls */
};
#endif

137
include/linux/drbd_limits.h Normal file
View File

@@ -0,0 +1,137 @@
/*
drbd_limits.h
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
*/
/*
* Our current limitations.
* Some of them are hard limits,
* some of them are arbitrary range limits, that make it easier to provide
* feedback about nonsense settings for certain configurable values.
*/
#ifndef DRBD_LIMITS_H
#define DRBD_LIMITS_H 1
#define DEBUG_RANGE_CHECK 0
#define DRBD_MINOR_COUNT_MIN 1
#define DRBD_MINOR_COUNT_MAX 255
#define DRBD_DIALOG_REFRESH_MIN 0
#define DRBD_DIALOG_REFRESH_MAX 600
/* valid port number */
#define DRBD_PORT_MIN 1
#define DRBD_PORT_MAX 0xffff
/* startup { */
/* if you want more than 3.4 days, disable */
#define DRBD_WFC_TIMEOUT_MIN 0
#define DRBD_WFC_TIMEOUT_MAX 300000
#define DRBD_WFC_TIMEOUT_DEF 0
#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
/* }*/
/* net { */
/* timeout, unit centi seconds
* more than one minute timeout is not usefull */
#define DRBD_TIMEOUT_MIN 1
#define DRBD_TIMEOUT_MAX 600
#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
/* active connection retries when C_WF_CONNECTION */
#define DRBD_CONNECT_INT_MIN 1
#define DRBD_CONNECT_INT_MAX 120
#define DRBD_CONNECT_INT_DEF 10 /* seconds */
/* keep-alive probes when idle */
#define DRBD_PING_INT_MIN 1
#define DRBD_PING_INT_MAX 120
#define DRBD_PING_INT_DEF 10
/* timeout for the ping packets.*/
#define DRBD_PING_TIMEO_MIN 1
#define DRBD_PING_TIMEO_MAX 100
#define DRBD_PING_TIMEO_DEF 5
/* max number of write requests between write barriers */
#define DRBD_MAX_EPOCH_SIZE_MIN 1
#define DRBD_MAX_EPOCH_SIZE_MAX 20000
#define DRBD_MAX_EPOCH_SIZE_DEF 2048
/* I don't think that a tcp send buffer of more than 10M is usefull */
#define DRBD_SNDBUF_SIZE_MIN 0
#define DRBD_SNDBUF_SIZE_MAX (10<<20)
#define DRBD_SNDBUF_SIZE_DEF 0
#define DRBD_RCVBUF_SIZE_MIN 0
#define DRBD_RCVBUF_SIZE_MAX (10<<20)
#define DRBD_RCVBUF_SIZE_DEF 0
/* @4k PageSize -> 128kB - 512MB */
#define DRBD_MAX_BUFFERS_MIN 32
#define DRBD_MAX_BUFFERS_MAX 131072
#define DRBD_MAX_BUFFERS_DEF 2048
/* @4k PageSize -> 4kB - 512MB */
#define DRBD_UNPLUG_WATERMARK_MIN 1
#define DRBD_UNPLUG_WATERMARK_MAX 131072
#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
/* 0 is disabled.
* 200 should be more than enough even for very short timeouts */
#define DRBD_KO_COUNT_MIN 0
#define DRBD_KO_COUNT_MAX 200
#define DRBD_KO_COUNT_DEF 0
/* } */
/* syncer { */
/* FIXME allow rate to be zero? */
#define DRBD_RATE_MIN 1
/* channel bonding 10 GbE, or other hardware */
#define DRBD_RATE_MAX (4 << 20)
#define DRBD_RATE_DEF 250 /* kb/second */
/* less than 7 would hit performance unneccessarily.
* 3833 is the largest prime that still does fit
* into 64 sectors of activity log */
#define DRBD_AL_EXTENTS_MIN 7
#define DRBD_AL_EXTENTS_MAX 3833
#define DRBD_AL_EXTENTS_DEF 127
#define DRBD_AFTER_MIN -1
#define DRBD_AFTER_MAX 255
#define DRBD_AFTER_DEF -1
/* } */
/* drbdsetup XY resize -d Z
* you are free to reduce the device size to nothing, if you want to.
* the upper limit with 64bit kernel, enough ram and flexible meta data
* is 16 TB, currently. */
/* DRBD_MAX_SECTORS */
#define DRBD_DISK_SIZE_SECT_MIN 0
#define DRBD_DISK_SIZE_SECT_MAX (16 * (2LLU << 30))
#define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */
#define DRBD_ON_IO_ERROR_DEF EP_PASS_ON
#define DRBD_FENCING_DEF FP_DONT_CARE
#define DRBD_AFTER_SB_0P_DEF ASB_DISCONNECT
#define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT
#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT
#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT
#define DRBD_MAX_BIO_BVECS_MIN 0
#define DRBD_MAX_BIO_BVECS_MAX 128
#define DRBD_MAX_BIO_BVECS_DEF 0
#undef RANGE
#endif

137
include/linux/drbd_nl.h Normal file
View File

@@ -0,0 +1,137 @@
/*
PAKET( name,
TYPE ( pn, pr, member )
...
)
You may never reissue one of the pn arguments
*/
#if !defined(NL_PACKET) || !defined(NL_STRING) || !defined(NL_INTEGER) || !defined(NL_BIT) || !defined(NL_INT64)
#error "The macros NL_PACKET, NL_STRING, NL_INTEGER, NL_INT64 and NL_BIT needs to be defined"
#endif
NL_PACKET(primary, 1,
NL_BIT( 1, T_MAY_IGNORE, overwrite_peer)
)
NL_PACKET(secondary, 2, )
NL_PACKET(disk_conf, 3,
NL_INT64( 2, T_MAY_IGNORE, disk_size)
NL_STRING( 3, T_MANDATORY, backing_dev, 128)
NL_STRING( 4, T_MANDATORY, meta_dev, 128)
NL_INTEGER( 5, T_MANDATORY, meta_dev_idx)
NL_INTEGER( 6, T_MAY_IGNORE, on_io_error)
NL_INTEGER( 7, T_MAY_IGNORE, fencing)
NL_BIT( 37, T_MAY_IGNORE, use_bmbv)
NL_BIT( 53, T_MAY_IGNORE, no_disk_flush)
NL_BIT( 54, T_MAY_IGNORE, no_md_flush)
/* 55 max_bio_size was available in 8.2.6rc2 */
NL_INTEGER( 56, T_MAY_IGNORE, max_bio_bvecs)
NL_BIT( 57, T_MAY_IGNORE, no_disk_barrier)
NL_BIT( 58, T_MAY_IGNORE, no_disk_drain)
)
NL_PACKET(detach, 4, )
NL_PACKET(net_conf, 5,
NL_STRING( 8, T_MANDATORY, my_addr, 128)
NL_STRING( 9, T_MANDATORY, peer_addr, 128)
NL_STRING( 10, T_MAY_IGNORE, shared_secret, SHARED_SECRET_MAX)
NL_STRING( 11, T_MAY_IGNORE, cram_hmac_alg, SHARED_SECRET_MAX)
NL_STRING( 44, T_MAY_IGNORE, integrity_alg, SHARED_SECRET_MAX)
NL_INTEGER( 14, T_MAY_IGNORE, timeout)
NL_INTEGER( 15, T_MANDATORY, wire_protocol)
NL_INTEGER( 16, T_MAY_IGNORE, try_connect_int)
NL_INTEGER( 17, T_MAY_IGNORE, ping_int)
NL_INTEGER( 18, T_MAY_IGNORE, max_epoch_size)
NL_INTEGER( 19, T_MAY_IGNORE, max_buffers)
NL_INTEGER( 20, T_MAY_IGNORE, unplug_watermark)
NL_INTEGER( 21, T_MAY_IGNORE, sndbuf_size)
NL_INTEGER( 22, T_MAY_IGNORE, ko_count)
NL_INTEGER( 24, T_MAY_IGNORE, after_sb_0p)
NL_INTEGER( 25, T_MAY_IGNORE, after_sb_1p)
NL_INTEGER( 26, T_MAY_IGNORE, after_sb_2p)
NL_INTEGER( 39, T_MAY_IGNORE, rr_conflict)
NL_INTEGER( 40, T_MAY_IGNORE, ping_timeo)
NL_INTEGER( 67, T_MAY_IGNORE, rcvbuf_size)
/* 59 addr_family was available in GIT, never released */
NL_BIT( 60, T_MANDATORY, mind_af)
NL_BIT( 27, T_MAY_IGNORE, want_lose)
NL_BIT( 28, T_MAY_IGNORE, two_primaries)
NL_BIT( 41, T_MAY_IGNORE, always_asbp)
NL_BIT( 61, T_MAY_IGNORE, no_cork)
NL_BIT( 62, T_MANDATORY, auto_sndbuf_size)
)
NL_PACKET(disconnect, 6, )
NL_PACKET(resize, 7,
NL_INT64( 29, T_MAY_IGNORE, resize_size)
)
NL_PACKET(syncer_conf, 8,
NL_INTEGER( 30, T_MAY_IGNORE, rate)
NL_INTEGER( 31, T_MAY_IGNORE, after)
NL_INTEGER( 32, T_MAY_IGNORE, al_extents)
NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX)
NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32)
NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX)
NL_BIT( 65, T_MAY_IGNORE, use_rle)
)
NL_PACKET(invalidate, 9, )
NL_PACKET(invalidate_peer, 10, )
NL_PACKET(pause_sync, 11, )
NL_PACKET(resume_sync, 12, )
NL_PACKET(suspend_io, 13, )
NL_PACKET(resume_io, 14, )
NL_PACKET(outdate, 15, )
NL_PACKET(get_config, 16, )
NL_PACKET(get_state, 17,
NL_INTEGER( 33, T_MAY_IGNORE, state_i)
)
NL_PACKET(get_uuids, 18,
NL_STRING( 34, T_MAY_IGNORE, uuids, (UI_SIZE*sizeof(__u64)))
NL_INTEGER( 35, T_MAY_IGNORE, uuids_flags)
)
NL_PACKET(get_timeout_flag, 19,
NL_BIT( 36, T_MAY_IGNORE, use_degraded)
)
NL_PACKET(call_helper, 20,
NL_STRING( 38, T_MAY_IGNORE, helper, 32)
)
/* Tag nr 42 already allocated in drbd-8.1 development. */
NL_PACKET(sync_progress, 23,
NL_INTEGER( 43, T_MAY_IGNORE, sync_progress)
)
NL_PACKET(dump_ee, 24,
NL_STRING( 45, T_MAY_IGNORE, dump_ee_reason, 32)
NL_STRING( 46, T_MAY_IGNORE, seen_digest, SHARED_SECRET_MAX)
NL_STRING( 47, T_MAY_IGNORE, calc_digest, SHARED_SECRET_MAX)
NL_INT64( 48, T_MAY_IGNORE, ee_sector)
NL_INT64( 49, T_MAY_IGNORE, ee_block_id)
NL_STRING( 50, T_MAY_IGNORE, ee_data, 32 << 10)
)
NL_PACKET(start_ov, 25,
NL_INT64( 66, T_MAY_IGNORE, start_sector)
)
NL_PACKET(new_c_uuid, 26,
NL_BIT( 63, T_MANDATORY, clear_bm)
)
#undef NL_PACKET
#undef NL_INTEGER
#undef NL_INT64
#undef NL_BIT
#undef NL_STRING

View File

@@ -0,0 +1,83 @@
#ifndef DRBD_TAG_MAGIC_H
#define DRBD_TAG_MAGIC_H
#define TT_END 0
#define TT_REMOVED 0xE000
/* declare packet_type enums */
enum packet_types {
#define NL_PACKET(name, number, fields) P_ ## name = number,
#define NL_INTEGER(pn, pr, member)
#define NL_INT64(pn, pr, member)
#define NL_BIT(pn, pr, member)
#define NL_STRING(pn, pr, member, len)
#include "drbd_nl.h"
P_nl_after_last_packet,
};
/* These struct are used to deduce the size of the tag lists: */
#define NL_PACKET(name, number, fields) \
struct name ## _tag_len_struct { fields };
#define NL_INTEGER(pn, pr, member) \
int member; int tag_and_len ## member;
#define NL_INT64(pn, pr, member) \
__u64 member; int tag_and_len ## member;
#define NL_BIT(pn, pr, member) \
unsigned char member:1; int tag_and_len ## member;
#define NL_STRING(pn, pr, member, len) \
unsigned char member[len]; int member ## _len; \
int tag_and_len ## member;
#include "linux/drbd_nl.h"
/* declate tag-list-sizes */
static const int tag_list_sizes[] = {
#define NL_PACKET(name, number, fields) 2 fields ,
#define NL_INTEGER(pn, pr, member) + 4 + 4
#define NL_INT64(pn, pr, member) + 4 + 8
#define NL_BIT(pn, pr, member) + 4 + 1
#define NL_STRING(pn, pr, member, len) + 4 + (len)
#include "drbd_nl.h"
};
/* The two highest bits are used for the tag type */
#define TT_MASK 0xC000
#define TT_INTEGER 0x0000
#define TT_INT64 0x4000
#define TT_BIT 0x8000
#define TT_STRING 0xC000
/* The next bit indicates if processing of the tag is mandatory */
#define T_MANDATORY 0x2000
#define T_MAY_IGNORE 0x0000
#define TN_MASK 0x1fff
/* The remaining 13 bits are used to enumerate the tags */
#define tag_type(T) ((T) & TT_MASK)
#define tag_number(T) ((T) & TN_MASK)
/* declare tag enums */
#define NL_PACKET(name, number, fields) fields
enum drbd_tags {
#define NL_INTEGER(pn, pr, member) T_ ## member = pn | TT_INTEGER | pr ,
#define NL_INT64(pn, pr, member) T_ ## member = pn | TT_INT64 | pr ,
#define NL_BIT(pn, pr, member) T_ ## member = pn | TT_BIT | pr ,
#define NL_STRING(pn, pr, member, len) T_ ## member = pn | TT_STRING | pr ,
#include "drbd_nl.h"
};
struct tag {
const char *name;
int type_n_flags;
int max_len;
};
/* declare tag names */
#define NL_PACKET(name, number, fields) fields
static const struct tag tag_descriptions[] = {
#define NL_INTEGER(pn, pr, member) [ pn ] = { #member, TT_INTEGER | pr, sizeof(int) },
#define NL_INT64(pn, pr, member) [ pn ] = { #member, TT_INT64 | pr, sizeof(__u64) },
#define NL_BIT(pn, pr, member) [ pn ] = { #member, TT_BIT | pr, sizeof(int) },
#define NL_STRING(pn, pr, member, len) [ pn ] = { #member, TT_STRING | pr, (len) },
#include "drbd_nl.h"
};
#endif

View File

@@ -3,8 +3,7 @@
#include <linux/types.h>
struct sock_extended_err
{
struct sock_extended_err {
__u32 ee_errno;
__u8 ee_origin;
__u8 ee_type;
@@ -31,8 +30,7 @@ struct sock_extended_err
#define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb))
struct sock_exterr_skb
{
struct sock_exterr_skb {
union {
struct inet_skb_parm h4;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)

View File

@@ -49,13 +49,14 @@ static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep)
return (ep->speed_hi << 16) | ep->speed;
}
#define ETHTOOL_FWVERS_LEN 32
#define ETHTOOL_BUSINFO_LEN 32
/* these strings are set to whatever the driver author decides... */
struct ethtool_drvinfo {
__u32 cmd;
char driver[32]; /* driver short name, "tulip", "eepro100" */
char version[32]; /* driver version string */
char fw_version[32]; /* firmware version string, if applicable */
char fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */
char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */
/* For PCI devices, use pci_name(pci_dev). */
char reserved1[32];
@@ -495,13 +496,10 @@ struct ethtool_ops {
u32 (*get_priv_flags)(struct net_device *);
int (*set_priv_flags)(struct net_device *, u32);
int (*get_sset_count)(struct net_device *, int);
/* the following hooks are obsolete */
int (*self_test_count)(struct net_device *);/* use get_sset_count */
int (*get_stats_count)(struct net_device *);/* use get_sset_count */
int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, void *);
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
};
#endif /* __KERNEL__ */
@@ -559,6 +557,7 @@ struct ethtool_ops {
#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */
#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */
#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
#define ETHTOOL_RESET 0x00000034 /* Reset hardware */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -633,6 +632,8 @@ struct ethtool_ops {
#define PORT_MII 0x02
#define PORT_FIBRE 0x03
#define PORT_BNC 0x04
#define PORT_DA 0x05
#define PORT_NONE 0xef
#define PORT_OTHER 0xff
/* Which transceiver to use. */
@@ -676,6 +677,8 @@ struct ethtool_ops {
#define AH_V6_FLOW 0x0b
#define ESP_V6_FLOW 0x0c
#define IP_USER_FLOW 0x0d
#define IPV4_FLOW 0x10
#define IPV6_FLOW 0x11
/* L3-L4 network traffic flow hash options */
#define RXH_L2DA (1 << 1)
@@ -689,4 +692,34 @@ struct ethtool_ops {
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
/* Reset flags */
/* The reset() operation must clear the flags for the components which
* were actually reset. On successful return, the flags indicate the
* components which were not reset, either because they do not exist
* in the hardware or because they cannot be reset independently. The
* driver must never reset any components that were not requested.
*/
enum ethtool_reset_flags {
/* These flags represent components dedicated to the interface
* the command is addressed to. Shift any flag left by
* ETH_RESET_SHARED_SHIFT to reset a shared component of the
* same type.
*/
ETH_RESET_MGMT = 1 << 0, /* Management processor */
ETH_RESET_IRQ = 1 << 1, /* Interrupt requester */
ETH_RESET_DMA = 1 << 2, /* DMA engine */
ETH_RESET_FILTER = 1 << 3, /* Filtering/flow direction */
ETH_RESET_OFFLOAD = 1 << 4, /* Protocol offload */
ETH_RESET_MAC = 1 << 5, /* Media access controller */
ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */
ETH_RESET_RAM = 1 << 7, /* RAM shared between
* multiple components */
ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to
* this interface */
ETH_RESET_ALL = 0xffffffff, /* All components used by this
* interface, even if shared */
};
#define ETH_RESET_SHARED_SHIFT 16
#endif /* _LINUX_ETHTOOL_H */

View File

@@ -8,13 +8,14 @@
#define FIB_RULE_PERMANENT 0x00000001
#define FIB_RULE_INVERT 0x00000002
#define FIB_RULE_UNRESOLVED 0x00000004
#define FIB_RULE_DEV_DETACHED 0x00000008
#define FIB_RULE_IIF_DETACHED 0x00000008
#define FIB_RULE_DEV_DETACHED FIB_RULE_IIF_DETACHED
#define FIB_RULE_OIF_DETACHED 0x00000010
/* try to find source address in routing lookups */
#define FIB_RULE_FIND_SADDR 0x00010000
struct fib_rule_hdr
{
struct fib_rule_hdr {
__u8 family;
__u8 dst_len;
__u8 src_len;
@@ -28,12 +29,12 @@ struct fib_rule_hdr
__u32 flags;
};
enum
{
enum {
FRA_UNSPEC,
FRA_DST, /* destination address */
FRA_SRC, /* source address */
FRA_IFNAME, /* interface name */
FRA_IIFNAME, /* interface name */
#define FRA_IFNAME FRA_IIFNAME
FRA_GOTO, /* target to jump to (FR_ACT_GOTO) */
FRA_UNUSED2,
FRA_PRIORITY, /* priority/preference */
@@ -47,13 +48,13 @@ enum
FRA_UNUSED8,
FRA_TABLE, /* Extended table id */
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
__FRA_MAX
};
#define FRA_MAX (__FRA_MAX - 1)
enum
{
enum {
FR_ACT_UNSPEC,
FR_ACT_TO_TBL, /* Pass to fixed table */
FR_ACT_GOTO, /* Jump to another rule */

View File

@@ -23,16 +23,14 @@
* the BPF code definitions which need to match so you can share filters
*/
struct sock_filter /* Filter block */
{
struct sock_filter { /* Filter block */
__u16 code; /* Actual filter code */
__u8 jt; /* Jump true */
__u8 jf; /* Jump false */
__u32 k; /* Generic multiuse field */
};
struct sock_fprog /* Required for SO_ATTACH_FILTER. */
{
struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
unsigned short len; /* Number of filter blocks */
struct sock_filter __user *filter;
};
@@ -123,7 +121,9 @@ struct sock_fprog /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_IFINDEX 8
#define SKF_AD_NLATTR 12
#define SKF_AD_NLATTR_NEST 16
#define SKF_AD_MAX 20
#define SKF_AD_MARK 20
#define SKF_AD_QUEUE 24
#define SKF_AD_MAX 28
#define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000)

View File

@@ -20,20 +20,6 @@
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
{
u32 *dst = _dst;
__be32 *src = _src;
int i;
for (i = 0; i < size / 4; i++)
dst[i] = be32_to_cpu(src[i]);
}
static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
{
fw_memcpy_from_be32(_dst, _src, size);
}
#define CSR_REGISTER_BASE 0xfffff0000000ULL
/* register offsets are relative to CSR_REGISTER_BASE */
@@ -131,7 +117,7 @@ struct fw_card {
bool broadcast_channel_allocated;
u32 broadcast_channel;
u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
};
struct fw_attribute_group {
@@ -281,6 +267,7 @@ struct fw_packet {
void *payload;
size_t payload_length;
dma_addr_t payload_bus;
bool payload_mapped;
u32 timestamp;
/*

View File

@@ -129,7 +129,7 @@ struct inodes_stat_t {
* WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
* immediately after submission. The write equivalent
* of READ_SYNC.
* WRITE_ODIRECT Special case write for O_DIRECT only.
* WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
* SWRITE_SYNC
* SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
* See SWRITE.
@@ -151,7 +151,7 @@ struct inodes_stat_t {
#define READ_META (READ | (1 << BIO_RW_META))
#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO))
#define SWRITE_SYNC_PLUG \
(SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
@@ -304,6 +304,7 @@ struct inodes_stat_t {
#define BLKIOOPT _IO(0x12,121)
#define BLKALIGNOFF _IO(0x12,122)
#define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */

View File

@@ -91,6 +91,8 @@ struct fscache_operation {
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEAD 6 /* op is now dead */
#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
atomic_t usage;
unsigned debug_id; /* debugging ID */
@@ -102,6 +104,16 @@ struct fscache_operation {
/* operation releaser */
fscache_operation_release_t release;
#ifdef CONFIG_SLOW_WORK_PROC
const char *name; /* operation name */
const char *state; /* operation state */
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
#else
#define fscache_set_op_name(OP, N) do { } while(0)
#define fscache_set_op_state(OP, S) do { } while(0)
#endif
};
extern atomic_t fscache_op_debug_id;
@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
op->release = release;
INIT_LIST_HEAD(&op->pend_link);
fscache_set_op_state(op, "Init");
}
/**
@@ -221,8 +234,10 @@ struct fscache_cache_ops {
struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
struct fscache_cookie *cookie);
/* look up the object for a cookie */
void (*lookup_object)(struct fscache_object *object);
/* look up the object for a cookie
* - return -ETIMEDOUT to be requeued
*/
int (*lookup_object)(struct fscache_object *object);
/* finished looking up */
void (*lookup_complete)(struct fscache_object *object);
@@ -297,12 +312,14 @@ struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */
spinlock_t lock;
spinlock_t stores_lock; /* lock on page store tree */
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
unsigned long flags;
#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
@@ -337,6 +354,7 @@ struct fscache_object {
FSCACHE_OBJECT_RECYCLING, /* retiring object */
FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
FSCACHE_OBJECT_DEAD, /* object is now dead */
FSCACHE_OBJECT__NSTATES
} state;
int debug_id; /* debugging ID */
@@ -345,6 +363,7 @@ struct fscache_object {
int n_obj_ops; /* number of object ops outstanding on object */
int n_in_progress; /* number of ops in progress */
int n_exclusive; /* number of exclusive ops queued */
atomic_t n_reads; /* number of read ops in progress */
spinlock_t lock; /* state and operations lock */
unsigned long lookup_jif; /* time at which lookup started */
@@ -358,6 +377,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
unsigned long flags;
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
@@ -373,7 +393,11 @@ struct fscache_object {
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
#ifdef CONFIG_FSCACHE_OBJECT_LIST
struct rb_node objlist_link; /* link in global object list */
#endif
pgoff_t store_limit; /* current storage limit */
loff_t store_limit_l; /* current storage limit */
};
extern const char *fscache_object_states[];
@@ -383,6 +407,10 @@ extern const char *fscache_object_states[];
(obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
(obj)->state < FSCACHE_OBJECT_DYING)
#define fscache_object_is_dead(obj) \
(test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
(obj)->state >= FSCACHE_OBJECT_DYING)
extern const struct slow_work_ops fscache_object_slow_work_ops;
/**
@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object,
object->events = object->event_mask = 0;
object->flags = 0;
object->store_limit = 0;
object->store_limit_l = 0;
object->cache = cache;
object->cookie = cookie;
object->parent = NULL;
@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object,
extern void fscache_object_lookup_negative(struct fscache_object *object);
extern void fscache_obtained_object(struct fscache_object *object);
#ifdef CONFIG_FSCACHE_OBJECT_LIST
extern void fscache_object_destroy(struct fscache_object *object);
#else
#define fscache_object_destroy(object) do {} while(0)
#endif
/**
* fscache_object_destroyed - Note destruction of an object in a cache
* @cache: The cache from which the object came
@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
static inline
void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
{
object->store_limit_l = i_size;
object->store_limit = i_size >> PAGE_SHIFT;
if (i_size & ~PAGE_MASK)
object->store_limit++;

View File

@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
gfp_t);
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie,
__fscache_wait_on_page_write(cookie, page);
}
/**
* fscache_maybe_release_page - Consider releasing a page, cancelling a store
* @cookie: The cookie representing the cache object
* @page: The netfs page that is being cached.
* @gfp: The gfp flags passed to releasepage()
*
* Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
* releasepage() call. A storage request on the page may cancelled if it is
* not currently being processed.
*
* The function returns true if the page no longer has a storage request on it,
* and false if a storage request is left in place. If true is returned, the
* page will have been passed to fscache_uncache_page(). If false is returned
* the page cannot be freed yet.
*/
static inline
bool fscache_maybe_release_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie) && PageFsCache(page))
return __fscache_maybe_release_page(cookie, page, gfp);
return false;
}
#endif /* _LINUX_FSCACHE_H */

View File

@@ -117,12 +117,12 @@ struct ftrace_event_call {
struct dentry *dir;
struct trace_event *event;
int enabled;
int (*regfunc)(void *);
void (*unregfunc)(void *);
int (*regfunc)(struct ftrace_event_call *);
void (*unregfunc)(struct ftrace_event_call *);
int id;
int (*raw_init)(void);
int (*show_format)(struct ftrace_event_call *call,
struct trace_seq *s);
int (*raw_init)(struct ftrace_event_call *);
int (*show_format)(struct ftrace_event_call *,
struct trace_seq *);
int (*define_fields)(struct ftrace_event_call *);
struct list_head fields;
int filter_active;
@@ -131,20 +131,20 @@ struct ftrace_event_call {
void *data;
atomic_t profile_count;
int (*profile_enable)(void);
void (*profile_disable)(void);
int (*profile_enable)(struct ftrace_event_call *);
void (*profile_disable)(struct ftrace_event_call *);
};
#define FTRACE_MAX_PROFILE_SIZE 2048
extern char *trace_profile_buf;
extern char *trace_profile_buf_nmi;
extern char *perf_trace_buf;
extern char *perf_trace_buf_nmi;
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
extern void destroy_preds(struct ftrace_event_call *call);
extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
extern int filter_match_preds(struct event_filter *filter, void *rec);
extern int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call,
void *rec,
@@ -157,11 +157,12 @@ enum {
FILTER_PTR_STRING,
};
extern int trace_define_field(struct ftrace_event_call *call,
const char *type, const char *name,
int offset, int size, int is_signed,
int filter_type);
extern int trace_define_common_fields(struct ftrace_event_call *call);
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
extern int trace_add_event_call(struct ftrace_event_call *call);
extern void trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < 0)
@@ -186,4 +187,13 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
#ifdef CONFIG_EVENT_PROFILE
struct perf_event;
extern int ftrace_profile_enable(int event_id);
extern void ftrace_profile_disable(int event_id);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
#endif
#endif /* _LINUX_FTRACE_EVENT_H */

View File

@@ -18,13 +18,11 @@ enum {
* @bytes: number of seen bytes
* @packets: number of seen packets
*/
struct gnet_stats_basic
{
struct gnet_stats_basic {
__u64 bytes;
__u32 packets;
};
struct gnet_stats_basic_packed
{
struct gnet_stats_basic_packed {
__u64 bytes;
__u32 packets;
} __attribute__ ((packed));
@@ -34,8 +32,7 @@ struct gnet_stats_basic_packed
* @bps: current byte rate
* @pps: current packet rate
*/
struct gnet_stats_rate_est
{
struct gnet_stats_rate_est {
__u32 bps;
__u32 pps;
};
@@ -48,8 +45,7 @@ struct gnet_stats_rate_est
* @requeues: number of requeues
* @overlimits: number of enqueues over the limit
*/
struct gnet_stats_queue
{
struct gnet_stats_queue {
__u32 qlen;
__u32 backlog;
__u32 drops;
@@ -62,8 +58,7 @@ struct gnet_stats_queue
* @interval: sampling period
* @ewma_log: the log of measurement window weight
*/
struct gnet_estimator
{
struct gnet_estimator {
signed char interval;
unsigned char ewma_log;
};

View File

@@ -91,6 +91,7 @@ struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
sector_t alignment_offset;
unsigned int discard_alignment;
struct device __dev;
struct kobject *holder_dir;
int policy, partno;

View File

@@ -81,7 +81,11 @@ struct gfs2_meta_header {
__be32 mh_type;
__be64 __pad0; /* Was generation number in gfs1 */
__be32 mh_format;
__be32 __pad1; /* Was incarnation number in gfs1 */
/* This union is to keep userspace happy */
union {
__be32 mh_jid; /* Was incarnation number in gfs1 */
__be32 __pad1;
};
};
/*

View File

@@ -139,10 +139,34 @@ static inline void account_system_vtime(struct task_struct *tsk)
#endif
#if defined(CONFIG_NO_HZ)
#if defined(CONFIG_TINY_RCU)
extern void rcu_enter_nohz(void);
extern void rcu_exit_nohz(void);
static inline void rcu_irq_enter(void)
{
rcu_exit_nohz();
}
static inline void rcu_irq_exit(void)
{
rcu_enter_nohz();
}
static inline void rcu_nmi_enter(void)
{
}
static inline void rcu_nmi_exit(void)
{
}
#else
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#endif
#else
# define rcu_irq_enter() do { } while (0)
# define rcu_irq_exit() do { } while (0)

View File

@@ -0,0 +1,131 @@
#ifndef _LINUX_HW_BREAKPOINT_H
#define _LINUX_HW_BREAKPOINT_H
enum {
HW_BREAKPOINT_LEN_1 = 1,
HW_BREAKPOINT_LEN_2 = 2,
HW_BREAKPOINT_LEN_4 = 4,
HW_BREAKPOINT_LEN_8 = 8,
};
enum {
HW_BREAKPOINT_R = 1,
HW_BREAKPOINT_W = 2,
HW_BREAKPOINT_X = 4,
};
#ifdef __KERNEL__
#include <linux/perf_event.h>
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/* As it's for in-kernel or ptrace use, we want it to be pinned */
#define DEFINE_BREAKPOINT_ATTR(name) \
struct perf_event_attr name = { \
.type = PERF_TYPE_BREAKPOINT, \
.size = sizeof(name), \
.pinned = 1, \
};
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
{
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(*attr);
attr->pinned = 1;
}
static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
{
return bp->attr.bp_addr;
}
static inline int hw_breakpoint_type(struct perf_event *bp)
{
return bp->attr.bp_type;
}
static inline int hw_breakpoint_len(struct perf_event *bp)
{
return bp->attr.bp_len;
}
extern struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
perf_callback_t triggered,
struct task_struct *tsk);
/* FIXME: only change from the attr, and don't unregister */
extern struct perf_event *
modify_user_hw_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr,
perf_callback_t triggered,
struct task_struct *tsk);
/*
* Kernel breakpoints are not associated with any particular thread.
*/
extern struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_callback_t triggered,
int cpu);
extern struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
perf_callback_t triggered);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
extern int __register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
{
return &bp->hw.info;
}
#else /* !CONFIG_HAVE_HW_BREAKPOINT */
static inline struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
perf_callback_t triggered,
struct task_struct *tsk) { return NULL; }
static inline struct perf_event *
modify_user_hw_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr,
perf_callback_t triggered,
struct task_struct *tsk) { return NULL; }
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_callback_t triggered,
int cpu) { return NULL; }
static inline struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
perf_callback_t triggered) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline int
__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event **cpu_events) { }
static inline int
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
static inline void release_bp_slot(struct perf_event *bp) { }
static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { }
static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
{
return NULL;
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */
#endif /* _LINUX_HW_BREAKPOINT_H */

View File

@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @driver: Device driver model driver
* @id_table: List of I2C devices supported by this driver
* @detect: Callback for device detection
* @address_data: The I2C addresses to probe, ignore or force (for detect)
* @address_data: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
@@ -338,8 +338,7 @@ struct i2c_adapter {
void *algo_data;
/* data fields that are valid for all devices */
u8 level; /* nesting level for lockdep */
struct mutex bus_lock;
struct rt_mutex bus_lock;
int timeout; /* in jiffies */
int retries;
@@ -367,7 +366,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
*/
static inline void i2c_lock_adapter(struct i2c_adapter *adapter)
{
mutex_lock(&adapter->bus_lock);
rt_mutex_lock(&adapter->bus_lock);
}
/**
@@ -376,7 +375,7 @@ static inline void i2c_lock_adapter(struct i2c_adapter *adapter)
*/
static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
{
mutex_unlock(&adapter->bus_lock);
rt_mutex_unlock(&adapter->bus_lock);
}
/*flags for the client struct: */
@@ -398,9 +397,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
*/
struct i2c_client_address_data {
const unsigned short *normal_i2c;
const unsigned short *probe;
const unsigned short *ignore;
const unsigned short * const *forces;
};
/* Internal numbers to terminate lists */
@@ -614,134 +610,48 @@ union i2c_smbus_data {
module_param_array(var, short, &var##_num, 0); \
MODULE_PARM_DESC(var, desc)
#define I2C_CLIENT_MODULE_PARM_FORCE(name) \
I2C_CLIENT_MODULE_PARM(force_##name, \
"List of adapter,address pairs which are " \
"unquestionably assumed to contain a `" \
# name "' chip")
#define I2C_CLIENT_INSMOD_COMMON \
I2C_CLIENT_MODULE_PARM(probe, "List of adapter,address pairs to scan " \
"additionally"); \
I2C_CLIENT_MODULE_PARM(ignore, "List of adapter,address pairs not to " \
"scan"); \
static const struct i2c_client_address_data addr_data = { \
.normal_i2c = normal_i2c, \
.probe = probe, \
.ignore = ignore, \
.forces = forces, \
}
#define I2C_CLIENT_FORCE_TEXT \
"List of adapter,address pairs to boldly assume to be present"
/* These are the ones you want to use in your own drivers. Pick the one
which matches the number of devices the driver differenciates between. */
#define I2C_CLIENT_INSMOD \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
static const unsigned short * const forces[] = { force, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_1(chip1) \
enum chips { any_chip, chip1 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
static const unsigned short * const forces[] = { force, \
force_##chip1, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
enum chips { any_chip, chip1, chip2 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
I2C_CLIENT_MODULE_PARM_FORCE(chip2); \
static const unsigned short * const forces[] = { force, \
force_##chip1, force_##chip2, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
enum chips { any_chip, chip1, chip2, chip3 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
I2C_CLIENT_MODULE_PARM_FORCE(chip2); \
I2C_CLIENT_MODULE_PARM_FORCE(chip3); \
static const unsigned short * const forces[] = { force, \
force_##chip1, force_##chip2, force_##chip3, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
I2C_CLIENT_MODULE_PARM_FORCE(chip2); \
I2C_CLIENT_MODULE_PARM_FORCE(chip3); \
I2C_CLIENT_MODULE_PARM_FORCE(chip4); \
static const unsigned short * const forces[] = { force, \
force_##chip1, force_##chip2, force_##chip3, \
force_##chip4, NULL}; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
I2C_CLIENT_MODULE_PARM_FORCE(chip2); \
I2C_CLIENT_MODULE_PARM_FORCE(chip3); \
I2C_CLIENT_MODULE_PARM_FORCE(chip4); \
I2C_CLIENT_MODULE_PARM_FORCE(chip5); \
static const unsigned short * const forces[] = { force, \
force_##chip1, force_##chip2, force_##chip3, \
force_##chip4, force_##chip5, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
I2C_CLIENT_MODULE_PARM_FORCE(chip2); \
I2C_CLIENT_MODULE_PARM_FORCE(chip3); \
I2C_CLIENT_MODULE_PARM_FORCE(chip4); \
I2C_CLIENT_MODULE_PARM_FORCE(chip5); \
I2C_CLIENT_MODULE_PARM_FORCE(chip6); \
static const unsigned short * const forces[] = { force, \
force_##chip1, force_##chip2, force_##chip3, \
force_##chip4, force_##chip5, force_##chip6, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
chip7 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
I2C_CLIENT_MODULE_PARM_FORCE(chip2); \
I2C_CLIENT_MODULE_PARM_FORCE(chip3); \
I2C_CLIENT_MODULE_PARM_FORCE(chip4); \
I2C_CLIENT_MODULE_PARM_FORCE(chip5); \
I2C_CLIENT_MODULE_PARM_FORCE(chip6); \
I2C_CLIENT_MODULE_PARM_FORCE(chip7); \
static const unsigned short * const forces[] = { force, \
force_##chip1, force_##chip2, force_##chip3, \
force_##chip4, force_##chip5, force_##chip6, \
force_##chip7, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
chip7, chip8 }; \
I2C_CLIENT_MODULE_PARM(force, I2C_CLIENT_FORCE_TEXT); \
I2C_CLIENT_MODULE_PARM_FORCE(chip1); \
I2C_CLIENT_MODULE_PARM_FORCE(chip2); \
I2C_CLIENT_MODULE_PARM_FORCE(chip3); \
I2C_CLIENT_MODULE_PARM_FORCE(chip4); \
I2C_CLIENT_MODULE_PARM_FORCE(chip5); \
I2C_CLIENT_MODULE_PARM_FORCE(chip6); \
I2C_CLIENT_MODULE_PARM_FORCE(chip7); \
I2C_CLIENT_MODULE_PARM_FORCE(chip8); \
static const unsigned short * const forces[] = { force, \
force_##chip1, force_##chip2, force_##chip3, \
force_##chip4, force_##chip5, force_##chip6, \
force_##chip7, force_##chip8, NULL }; \
I2C_CLIENT_INSMOD_COMMON
#endif /* __KERNEL__ */
#endif /* _LINUX_I2C_H */

View File

@@ -401,6 +401,24 @@ struct twl4030_power_data {
extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
struct twl4030_codec_audio_data {
unsigned int audio_mclk;
unsigned int ramp_delay_value;
unsigned int hs_extmute:1;
void (*set_hs_extmute)(int mute);
};
struct twl4030_codec_vibra_data {
unsigned int audio_mclk;
unsigned int coexist;
};
struct twl4030_codec_data {
unsigned int audio_mclk;
struct twl4030_codec_audio_data *audio;
struct twl4030_codec_vibra_data *vibra;
};
struct twl4030_platform_data {
unsigned irq_base, irq_end;
struct twl4030_bci_platform_data *bci;
@@ -409,6 +427,7 @@ struct twl4030_platform_data {
struct twl4030_keypad_data *keypad;
struct twl4030_usb_data *usb;
struct twl4030_power_data *power;
struct twl4030_codec_data *codec;
/* LDO regulators */
struct regulator_init_data *vdac;

229
include/linux/i82593.h Normal file
View File

@@ -0,0 +1,229 @@
/*
* Definitions for Intel 82593 CSMA/CD Core LAN Controller
* The definitions are taken from the 1992 users manual with Intel
* order number 297125-001.
*
* /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
*
* Copyright 1994, Anders Klemets <klemets@it.kth.se>
*
* HISTORY
* i82593.h,v
* Revision 1.4 2005/11/4 09:15:00 baroniunas
* Modified copyright with permission of author as follows:
*
* "If I82539.H is the only file with my copyright statement
* that is included in the Source Forge project, then you have
* my approval to change the copyright statement to be a GPL
* license, in the way you proposed on October 10."
*
* Revision 1.1 1996/07/17 15:23:12 root
* Initial revision
*
* Revision 1.3 1995/04/05 15:13:58 adj
* Initial alpha release
*
* Revision 1.2 1994/06/16 23:57:31 klemets
* Mirrored all the fields in the configuration block.
*
* Revision 1.1 1994/06/02 20:25:34 klemets
* Initial revision
*
*
*/
#ifndef _I82593_H
#define _I82593_H
/* Intel 82593 CSMA/CD Core LAN Controller */
/* Port 0 Command Register definitions */
/* Execution operations */
#define OP0_NOP 0 /* CHNL = 0 */
#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */
#define OP0_IA_SETUP 1
#define OP0_CONFIGURE 2
#define OP0_MC_SETUP 3
#define OP0_TRANSMIT 4
#define OP0_TDR 5
#define OP0_DUMP 6
#define OP0_DIAGNOSE 7
#define OP0_TRANSMIT_NO_CRC 9
#define OP0_RETRANSMIT 12
#define OP0_ABORT 13
/* Reception operations */
#define OP0_RCV_ENABLE 8
#define OP0_RCV_DISABLE 10
#define OP0_STOP_RCV 11
/* Status pointer control operations */
#define OP0_FIX_PTR 15 /* CHNL = 1 */
#define OP0_RLS_PTR 15 /* CHNL = 0 */
#define OP0_RESET 14
#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */
#define CR0_STATUS_0 0x00
#define CR0_STATUS_1 0x20
#define CR0_STATUS_2 0x40
#define CR0_STATUS_3 0x60
#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */
/* Port 0 Status Register definitions */
#define SR0_NO_RESULT 0 /* dummy */
#define SR0_EVENT_MASK 0x0f
#define SR0_IA_SETUP_DONE 1
#define SR0_CONFIGURE_DONE 2
#define SR0_MC_SETUP_DONE 3
#define SR0_TRANSMIT_DONE 4
#define SR0_TDR_DONE 5
#define SR0_DUMP_DONE 6
#define SR0_DIAGNOSE_PASSED 7
#define SR0_TRANSMIT_NO_CRC_DONE 9
#define SR0_RETRANSMIT_DONE 12
#define SR0_EXECUTION_ABORTED 13
#define SR0_END_OF_FRAME 8
#define SR0_RECEPTION_ABORTED 10
#define SR0_DIAGNOSE_FAILED 15
#define SR0_STOP_REG_HIT 11
#define SR0_CHNL (1 << 4)
#define SR0_EXECUTION (1 << 5)
#define SR0_RECEPTION (1 << 6)
#define SR0_INTERRUPT (1 << 7)
#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION)
#define SR3_EXEC_STATE_MASK 0x03
#define SR3_EXEC_IDLE 0
#define SR3_TX_ABORT_IN_PROGRESS 1
#define SR3_EXEC_ACTIVE 2
#define SR3_ABORT_IN_PROGRESS 3
#define SR3_EXEC_CHNL (1 << 2)
#define SR3_STP_ON_NO_RSRC (1 << 3)
#define SR3_RCVING_NO_RSRC (1 << 4)
#define SR3_RCV_STATE_MASK 0x60
#define SR3_RCV_IDLE 0x00
#define SR3_RCV_READY 0x20
#define SR3_RCV_ACTIVE 0x40
#define SR3_RCV_STOP_IN_PROG 0x60
#define SR3_RCV_CHNL (1 << 7)
/* Port 1 Command Register definitions */
#define OP1_NOP 0
#define OP1_SWIT_TO_PORT_0 1
#define OP1_INT_DISABLE 2
#define OP1_INT_ENABLE 3
#define OP1_SET_TS 5
#define OP1_RST_TS 7
#define OP1_POWER_DOWN 8
#define OP1_RESET_RING_MNGMT 11
#define OP1_RESET 14
#define OP1_SEL_RST 15
#define CR1_STATUS_4 0x00
#define CR1_STATUS_5 0x20
#define CR1_STATUS_6 0x40
#define CR1_STOP_REG_UPDATE (1 << 7)
/* Receive frame status bits */
#define RX_RCLD (1 << 0)
#define RX_IA_MATCH (1 << 1)
#define RX_NO_AD_MATCH (1 << 2)
#define RX_NO_SFD (1 << 3)
#define RX_SRT_FRM (1 << 7)
#define RX_OVRRUN (1 << 8)
#define RX_ALG_ERR (1 << 10)
#define RX_CRC_ERR (1 << 11)
#define RX_LEN_ERR (1 << 12)
#define RX_RCV_OK (1 << 13)
#define RX_TYP_LEN (1 << 15)
/* Transmit status bits */
#define TX_NCOL_MASK 0x0f
#define TX_FRTL (1 << 4)
#define TX_MAX_COL (1 << 5)
#define TX_HRT_BEAT (1 << 6)
#define TX_DEFER (1 << 7)
#define TX_UND_RUN (1 << 8)
#define TX_LOST_CTS (1 << 9)
#define TX_LOST_CRS (1 << 10)
#define TX_LTCOL (1 << 11)
#define TX_OK (1 << 13)
#define TX_COLL (1 << 15)
struct i82593_conf_block {
u_char fifo_limit : 4,
forgnesi : 1,
fifo_32 : 1,
d6mod : 1,
throttle_enb : 1;
u_char throttle : 6,
cntrxint : 1,
contin : 1;
u_char addr_len : 3,
acloc : 1,
preamb_len : 2,
loopback : 2;
u_char lin_prio : 3,
tbofstop : 1,
exp_prio : 3,
bof_met : 1;
u_char : 4,
ifrm_spc : 4;
u_char : 5,
slottim_low : 3;
u_char slottim_hi : 3,
: 1,
max_retr : 4;
u_char prmisc : 1,
bc_dis : 1,
: 1,
crs_1 : 1,
nocrc_ins : 1,
crc_1632 : 1,
: 1,
crs_cdt : 1;
u_char cs_filter : 3,
crs_src : 1,
cd_filter : 3,
: 1;
u_char : 2,
min_fr_len : 6;
u_char lng_typ : 1,
lng_fld : 1,
rxcrc_xf : 1,
artx : 1,
sarec : 1,
tx_jabber : 1, /* why is this called max_len in the manual? */
hash_1 : 1,
lbpkpol : 1;
u_char : 6,
fdx : 1,
: 1;
u_char dummy_6 : 6, /* supposed to be ones */
mult_ia : 1,
dis_bof : 1;
u_char dummy_1 : 1, /* supposed to be one */
tx_ifs_retrig : 2,
mc_all : 1,
rcv_mon : 2,
frag_acpt : 1,
tstrttrs : 1;
u_char fretx : 1,
runt_eop : 1,
hw_sw_pin : 1,
big_endn : 1,
syncrqs : 1,
sttlen : 1,
tx_eop : 1,
rx_eop : 1;
u_char rbuf_size : 5,
rcvstop : 1,
: 2;
};
#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
#endif /* _I82593_H */

View File

@@ -115,7 +115,6 @@
#define IEEE80211_MAX_SSID_LEN 32
#define IEEE80211_MAX_MESH_ID_LEN 32
#define IEEE80211_MESH_CONFIG_LEN 24
#define IEEE80211_QOS_CTL_LEN 2
#define IEEE80211_QOS_CTL_TID_MASK 0x000F
@@ -472,7 +471,7 @@ static inline int ieee80211_is_cfendack(__le16 fc)
}
/**
* ieee80211_is_nullfunc - check if FTYPE=IEEE80211_FTYPE_DATA and STYPE=IEEE80211_STYPE_NULLFUNC
* ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
* @fc: frame control bytes in little-endian byteorder
*/
static inline int ieee80211_is_nullfunc(__le16 fc)
@@ -481,6 +480,16 @@ static inline int ieee80211_is_nullfunc(__le16 fc)
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
}
/**
* ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
*/
static inline int ieee80211_is_qos_nullfunc(__le16 fc)
{
return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
}
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
@@ -544,6 +553,35 @@ struct ieee80211_tim_ie {
u8 virtual_map[1];
} __attribute__ ((packed));
/**
* struct ieee80211_meshconf_ie
*
* This structure refers to "Mesh Configuration information element"
*/
struct ieee80211_meshconf_ie {
u8 meshconf_psel;
u8 meshconf_pmetric;
u8 meshconf_congest;
u8 meshconf_synch;
u8 meshconf_auth;
u8 meshconf_form;
u8 meshconf_cap;
} __attribute__ ((packed));
/**
* struct ieee80211_rann_ie
*
* This structure refers to "Root Announcement information element"
*/
struct ieee80211_rann_ie {
u8 rann_flags;
u8 rann_hopcount;
u8 rann_ttl;
u8 rann_addr[6];
u32 rann_seq;
u32 rann_metric;
} __attribute__ ((packed));
#define WLAN_SA_QUERY_TR_ID_LEN 2
struct ieee80211_mgmt {
@@ -1060,6 +1098,7 @@ enum ieee80211_eid {
WLAN_EID_PREQ = 68,
WLAN_EID_PREP = 69,
WLAN_EID_PERR = 70,
WLAN_EID_RANN = 49, /* compatible with FreeBSD */
/* 802.11h */
WLAN_EID_PWR_CONSTRAINT = 32,
WLAN_EID_PWR_CAPABILITY = 33,
@@ -1227,6 +1266,8 @@ enum ieee80211_sa_query_action {
#define WLAN_MAX_KEY_LEN 32
#define WLAN_PMKID_LEN 16
/**
* ieee80211_get_qos_ctl - get pointer to qos control bytes
* @hdr: the frame

View File

@@ -70,6 +70,7 @@
#define IFF_XMIT_DST_RELEASE 0x400 /* dev_hard_start_xmit() is allowed to
* release skb->dst
*/
#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
@@ -125,8 +126,7 @@ enum {
* being very small might be worth keeping for clean configuration.
*/
struct ifmap
{
struct ifmap {
unsigned long mem_start;
unsigned long mem_end;
unsigned short base_addr;
@@ -136,8 +136,7 @@ struct ifmap
/* 3 bytes spare */
};
struct if_settings
{
struct if_settings {
unsigned int type; /* Type of physical device or protocol */
unsigned int size; /* Size of the data allocated by the caller */
union {
@@ -161,8 +160,7 @@ struct if_settings
* remainder may be interface specific.
*/
struct ifreq
{
struct ifreq {
#define IFHWADDRLEN 6
union
{
@@ -211,11 +209,9 @@ struct ifreq
* must know all networks accessible).
*/
struct ifconf
{
struct ifconf {
int ifc_len; /* size of buffer */
union
{
union {
char __user *ifcu_buf;
struct ifreq __user *ifcu_req;
} ifc_ifcu;

View File

@@ -4,8 +4,7 @@
#include <linux/types.h>
#include <linux/netlink.h>
struct ifaddrmsg
{
struct ifaddrmsg {
__u8 ifa_family;
__u8 ifa_prefixlen; /* The prefix length */
__u8 ifa_flags; /* Flags */
@@ -20,8 +19,7 @@ struct ifaddrmsg
* but for point-to-point IFA_ADDRESS is DESTINATION address,
* local address is supplied in IFA_LOCAL attribute.
*/
enum
{
enum {
IFA_UNSPEC,
IFA_ADDRESS,
IFA_LOCAL,
@@ -47,8 +45,7 @@ enum
#define IFA_F_TENTATIVE 0x40
#define IFA_F_PERMANENT 0x80
struct ifa_cacheinfo
{
struct ifa_cacheinfo {
__u32 ifa_prefered;
__u32 ifa_valid;
__u32 cstamp; /* created timestamp, hundredths of seconds */

View File

@@ -12,8 +12,7 @@
#include <linux/types.h>
struct ifaddrlblmsg
{
struct ifaddrlblmsg {
__u8 ifal_family; /* Address family */
__u8 __ifal_reserved; /* Reserved */
__u8 ifal_prefixlen; /* Prefix length */
@@ -22,8 +21,7 @@ struct ifaddrlblmsg
__u32 ifal_seq; /* sequence number */
};
enum
{
enum {
IFAL_ADDRESS = 1,
IFAL_LABEL = 2,
__IFAL_MAX

View File

@@ -56,8 +56,7 @@
/*
* The RFC1201-specific components of an arcnet packet header.
*/
struct arc_rfc1201
{
struct arc_rfc1201 {
__u8 proto; /* protocol ID field - varies */
__u8 split_flag; /* for use with split packets */
__be16 sequence; /* sequence number */
@@ -69,8 +68,7 @@ struct arc_rfc1201
/*
* The RFC1051-specific components.
*/
struct arc_rfc1051
{
struct arc_rfc1051 {
__u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
__u8 payload[0]; /* 507 bytes */
};
@@ -81,8 +79,7 @@ struct arc_rfc1051
* The ethernet-encap-specific components. We have a real ethernet header
* and some data.
*/
struct arc_eth_encap
{
struct arc_eth_encap {
__u8 proto; /* Always ARC_P_ETHER */
struct ethhdr eth; /* standard ethernet header (yuck!) */
__u8 payload[0]; /* 493 bytes */
@@ -90,8 +87,7 @@ struct arc_eth_encap
#define ETH_ENCAP_HDR_SIZE 14
struct arc_cap
{
struct arc_cap {
__u8 proto;
__u8 cookie[sizeof(int)]; /* Actually NOT sent over the network */
union {
@@ -108,8 +104,7 @@ struct arc_cap
* the _end_ of the 512-byte buffer. We hide this complexity inside the
* driver.
*/
struct arc_hardware
{
struct arc_hardware {
__u8 source, /* source ARCnet - filled in automagically */
dest, /* destination ARCnet - 0 for broadcast */
offset[2]; /* offset bytes (some weird semantics) */
@@ -120,8 +115,7 @@ struct arc_hardware
* This is an ARCnet frame header, as seen by the kernel (and userspace,
* when you do a raw packet capture).
*/
struct archdr
{
struct archdr {
/* hardware requirements */
struct arc_hardware hard;

View File

@@ -133,8 +133,7 @@ struct arpreq_old {
* This structure defines an ethernet arp header.
*/
struct arphdr
{
struct arphdr {
__be16 ar_hrd; /* format of hardware address */
__be16 ar_pro; /* format of protocol address */
unsigned char ar_hln; /* length of hardware address */

View File

@@ -94,8 +94,7 @@ typedef struct ifbond {
__s32 miimon;
} ifbond;
typedef struct ifslave
{
typedef struct ifslave {
__s32 slave_id; /* Used as an IN param to the BOND_SLAVE_INFO_QUERY ioctl */
char slave_name[IFNAMSIZ];
__s8 link;

View File

@@ -49,8 +49,7 @@
#define BR_STATE_FORWARDING 3
#define BR_STATE_BLOCKING 4
struct __bridge_info
{
struct __bridge_info {
__u64 designated_root;
__u64 bridge_id;
__u32 root_path_cost;
@@ -72,8 +71,7 @@ struct __bridge_info
__u32 gc_timer_value;
};
struct __port_info
{
struct __port_info {
__u64 designated_root;
__u64 designated_bridge;
__u16 port_id;
@@ -89,8 +87,7 @@ struct __port_info
__u32 hold_timer_value;
};
struct __fdb_entry
{
struct __fdb_entry {
__u8 mac_addr[6];
__u8 port_no;
__u8 is_local;

View File

@@ -5,14 +5,12 @@
/* User visible stuff. Glibc provides its own but libc5 folk will use these */
struct ec_addr
{
struct ec_addr {
unsigned char station; /* Station number. */
unsigned char net; /* Network number. */
};
struct sockaddr_ec
{
struct sockaddr_ec {
unsigned short sec_family;
unsigned char port; /* Port number. */
unsigned char cb; /* Control/flag byte. */
@@ -37,8 +35,7 @@ struct sockaddr_ec
#define EC_HLEN 6
/* This is what an Econet frame looks like on the wire. */
struct ec_framehdr
{
struct ec_framehdr {
unsigned char dst_stn;
unsigned char dst_net;
unsigned char src_stn;
@@ -62,8 +59,7 @@ static inline struct econet_sock *ec_sk(const struct sock *sk)
return (struct econet_sock *)sk;
}
struct ec_device
{
struct ec_device {
unsigned char station, net; /* Econet protocol address */
};

View File

@@ -136,10 +136,6 @@ extern struct ctl_table ether_table[];
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
/*
* Display a 6 byte device address (MAC) in a readable format.
*/
extern char *print_mac(char *buf, const unsigned char *addr) __deprecated;
#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_BUF_SIZE 18
#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE]

View File

@@ -63,36 +63,32 @@
#define FDDI_UI_CMD 0x03
/* Define 802.2 Type 1 header */
struct fddi_8022_1_hdr
{
struct fddi_8022_1_hdr {
__u8 dsap; /* destination service access point */
__u8 ssap; /* source service access point */
__u8 ctrl; /* control byte #1 */
} __attribute__ ((packed));
} __attribute__ ((packed));
/* Define 802.2 Type 2 header */
struct fddi_8022_2_hdr
{
struct fddi_8022_2_hdr {
__u8 dsap; /* destination service access point */
__u8 ssap; /* source service access point */
__u8 ctrl_1; /* control byte #1 */
__u8 ctrl_2; /* control byte #2 */
} __attribute__ ((packed));
} __attribute__ ((packed));
/* Define 802.2 SNAP header */
#define FDDI_K_OUI_LEN 3
struct fddi_snap_hdr
{
struct fddi_snap_hdr {
__u8 dsap; /* always 0xAA */
__u8 ssap; /* always 0xAA */
__u8 ctrl; /* always 0x03 */
__u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
__be16 ethertype; /* packet type ID field */
} __attribute__ ((packed));
} __attribute__ ((packed));
/* Define FDDI LLC frame header */
struct fddihdr
{
struct fddihdr {
__u8 fc; /* frame control */
__u8 daddr[FDDI_K_ALEN]; /* destination address */
__u8 saddr[FDDI_K_ALEN]; /* source address */
@@ -102,7 +98,7 @@ struct fddihdr
struct fddi_8022_2_hdr llc_8022_2;
struct fddi_snap_hdr llc_snap;
} hdr;
} __attribute__ ((packed));
} __attribute__ ((packed));
#ifdef __KERNEL__
#include <linux/netdevice.h>
@@ -197,7 +193,7 @@ struct fddi_statistics {
__u32 port_pc_withhold[2];
__u32 port_ler_flag[2];
__u32 port_hardware_present[2];
};
};
#endif /* __KERNEL__ */
#endif /* _LINUX_IF_FDDI_H */

View File

@@ -51,8 +51,7 @@
* HIPPI statistics collection data.
*/
struct hipnet_statistics
{
struct hipnet_statistics {
int rx_packets; /* total packets received */
int tx_packets; /* total packets transmitted */
int rx_errors; /* bad packets received */
@@ -77,8 +76,7 @@ struct hipnet_statistics
};
struct hippi_fp_hdr
{
struct hippi_fp_hdr {
#if 0
__u8 ulp; /* must contain 4 */
#if defined (__BIG_ENDIAN_BITFIELD)
@@ -108,8 +106,7 @@ struct hippi_fp_hdr
__be32 d2_size;
} __attribute__ ((packed));
struct hippi_le_hdr
{
struct hippi_le_hdr {
#if defined (__BIG_ENDIAN_BITFIELD)
__u8 fc:3;
__u8 double_wide:1;
@@ -139,8 +136,7 @@ struct hippi_le_hdr
* Looks like the dsap and ssap fields have been swapped by mistake in
* RFC 2067 "IP over HIPPI".
*/
struct hippi_snap_hdr
{
struct hippi_snap_hdr {
__u8 dsap; /* always 0xAA */
__u8 ssap; /* always 0xAA */
__u8 ctrl; /* always 0x03 */
@@ -148,8 +144,7 @@ struct hippi_snap_hdr
__be16 ethertype; /* packet type ID field */
} __attribute__ ((packed));
struct hippi_hdr
{
struct hippi_hdr {
struct hippi_fp_hdr fp;
struct hippi_le_hdr le;
struct hippi_snap_hdr snap;

View File

@@ -5,8 +5,7 @@
#include <linux/netlink.h>
/* The struct should be in sync with struct net_device_stats */
struct rtnl_link_stats
{
struct rtnl_link_stats {
__u32 rx_packets; /* total packets received */
__u32 tx_packets; /* total packets transmitted */
__u32 rx_bytes; /* total bytes received */
@@ -39,8 +38,7 @@ struct rtnl_link_stats
};
/* The struct should be in sync with struct ifmap */
struct rtnl_link_ifmap
{
struct rtnl_link_ifmap {
__u64 mem_start;
__u64 mem_end;
__u64 base_addr;
@@ -49,8 +47,7 @@ struct rtnl_link_ifmap
__u8 port;
};
enum
{
enum {
IFLA_UNSPEC,
IFLA_ADDRESS,
IFLA_BROADCAST,
@@ -123,8 +120,7 @@ enum
*/
/* Subtype attributes for IFLA_PROTINFO */
enum
{
enum {
IFLA_INET6_UNSPEC,
IFLA_INET6_FLAGS, /* link flags */
IFLA_INET6_CONF, /* sysctl parameters */
@@ -137,16 +133,14 @@ enum
#define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1)
struct ifla_cacheinfo
{
struct ifla_cacheinfo {
__u32 max_reasm_len;
__u32 tstamp; /* ipv6InterfaceTable updated timestamp */
__u32 reachable_time;
__u32 retrans_time;
};
enum
{
enum {
IFLA_INFO_UNSPEC,
IFLA_INFO_KIND,
IFLA_INFO_DATA,
@@ -158,8 +152,7 @@ enum
/* VLAN section */
enum
{
enum {
IFLA_VLAN_UNSPEC,
IFLA_VLAN_ID,
IFLA_VLAN_FLAGS,
@@ -175,8 +168,7 @@ struct ifla_vlan_flags {
__u32 mask;
};
enum
{
enum {
IFLA_VLAN_QOS_UNSPEC,
IFLA_VLAN_QOS_MAPPING,
__IFLA_VLAN_QOS_MAX
@@ -184,10 +176,24 @@ enum
#define IFLA_VLAN_QOS_MAX (__IFLA_VLAN_QOS_MAX - 1)
struct ifla_vlan_qos_mapping
{
struct ifla_vlan_qos_mapping {
__u32 from;
__u32 to;
};
/* MACVLAN section */
enum {
IFLA_MACVLAN_UNSPEC,
IFLA_MACVLAN_MODE,
__IFLA_MACVLAN_MAX,
};
#define IFLA_MACVLAN_MAX (__IFLA_MACVLAN_MAX - 1)
enum macvlan_mode {
MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */
MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
};
#endif /* _LINUX_IF_LINK_H */

View File

@@ -3,15 +3,13 @@
#include <linux/types.h>
struct sockaddr_pkt
{
struct sockaddr_pkt {
unsigned short spkt_family;
unsigned char spkt_device[14];
__be16 spkt_protocol;
};
struct sockaddr_ll
{
struct sockaddr_ll {
unsigned short sll_family;
__be16 sll_protocol;
int sll_ifindex;
@@ -49,14 +47,12 @@ struct sockaddr_ll
#define PACKET_TX_RING 13
#define PACKET_LOSS 14
struct tpacket_stats
{
struct tpacket_stats {
unsigned int tp_packets;
unsigned int tp_drops;
};
struct tpacket_auxdata
{
struct tpacket_auxdata {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
@@ -78,8 +74,7 @@ struct tpacket_auxdata
#define TP_STATUS_SENDING 0x2
#define TP_STATUS_WRONG_FORMAT 0x4
struct tpacket_hdr
{
struct tpacket_hdr {
unsigned long tp_status;
unsigned int tp_len;
unsigned int tp_snaplen;
@@ -93,8 +88,7 @@ struct tpacket_hdr
#define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1))
#define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll))
struct tpacket2_hdr
{
struct tpacket2_hdr {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
@@ -107,8 +101,7 @@ struct tpacket2_hdr
#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
enum tpacket_versions
{
enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
};
@@ -126,16 +119,14 @@ enum tpacket_versions
- Pad to align to TPACKET_ALIGNMENT=16
*/
struct tpacket_req
{
struct tpacket_req {
unsigned int tp_block_size; /* Minimal size of contiguous block */
unsigned int tp_block_nr; /* Number of blocks */
unsigned int tp_frame_size; /* Size of frame */
unsigned int tp_frame_nr; /* Total number of frames */
};
struct packet_mreq
{
struct packet_mreq {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;

View File

@@ -15,8 +15,7 @@
#define SIOCDEVPLIP SIOCDEVPRIVATE
struct plipconf
{
struct plipconf {
unsigned short pcmd;
unsigned long nibble;
unsigned long trigger;

View File

@@ -24,8 +24,7 @@
/* Structure used to connect() the socket to a particular tunnel UDP
* socket.
*/
struct pppol2tp_addr
{
struct pppol2tp_addr {
__kernel_pid_t pid; /* pid that owns the fd.
* 0 => current */
int fd; /* FD of UDP socket to use */

View File

@@ -5,6 +5,7 @@
#ifdef __KERNEL__
#include <linux/ip.h>
#include <linux/in6.h>
#endif
#define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0)
@@ -15,6 +16,10 @@
#define SIOCADDPRL (SIOCDEVPRIVATE + 5)
#define SIOCDELPRL (SIOCDEVPRIVATE + 6)
#define SIOCCHGPRL (SIOCDEVPRIVATE + 7)
#define SIOCGET6RD (SIOCDEVPRIVATE + 8)
#define SIOCADD6RD (SIOCDEVPRIVATE + 9)
#define SIOCDEL6RD (SIOCDEVPRIVATE + 10)
#define SIOCCHG6RD (SIOCDEVPRIVATE + 11)
#define GRE_CSUM __cpu_to_be16(0x8000)
#define GRE_ROUTING __cpu_to_be16(0x4000)
@@ -25,8 +30,7 @@
#define GRE_FLAGS __cpu_to_be16(0x00F8)
#define GRE_VERSION __cpu_to_be16(0x0007)
struct ip_tunnel_parm
{
struct ip_tunnel_parm {
char name[IFNAMSIZ];
int link;
__be16 i_flags;
@@ -51,8 +55,14 @@ struct ip_tunnel_prl {
/* PRL flags */
#define PRL_DEFAULT 0x0001
enum
{
struct ip_tunnel_6rd {
struct in6_addr prefix;
__be32 relay_prefix;
__u16 prefixlen;
__u16 relay_prefixlen;
};
enum {
IFLA_GRE_UNSPEC,
IFLA_GRE_LINK,
IFLA_GRE_IFLAGS,

View File

@@ -63,7 +63,11 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
#define VLAN_VID_MASK 0xfff
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
#define VLAN_TAG_PRESENT VLAN_CFI_MASK
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
@@ -81,6 +85,7 @@ struct vlan_group {
* the vlan is attached to.
*/
unsigned int nr_vlans;
int killall;
struct hlist_node hlist; /* linked list */
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu;
@@ -105,8 +110,8 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci)
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci)
#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
@@ -115,10 +120,12 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling);
extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb);
extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci);
extern gro_result_t
vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb);
extern gro_result_t
vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci);
#else
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -145,17 +152,18 @@ static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
return 0;
}
static inline int vlan_gro_receive(struct napi_struct *napi,
struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
static inline gro_result_t
vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
{
return NET_RX_DROP;
return GRO_DROP;
}
static inline int vlan_gro_frags(struct napi_struct *napi,
struct vlan_group *grp, unsigned int vlan_tci)
static inline gro_result_t
vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci)
{
return NET_RX_DROP;
return GRO_DROP;
}
#endif
@@ -231,7 +239,7 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
u16 vlan_tci)
{
skb->vlan_tci = vlan_tci;
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
return skb;
}
@@ -284,7 +292,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
if (vlan_tx_tag_present(skb)) {
*vlan_tci = skb->vlan_tci;
*vlan_tci = vlan_tx_tag_get(skb);
return 0;
} else {
*vlan_tci = 0;
@@ -331,6 +339,7 @@ enum vlan_ioctl_cmds {
enum vlan_flags {
VLAN_FLAG_REORDER_HDR = 0x1,
VLAN_FLAG_GVRP = 0x2,
VLAN_FLAG_LOOSE_BINDING = 0x4,
};
enum vlan_name_types {

View File

@@ -27,8 +27,7 @@
* Header in on cable format
*/
struct igmphdr
{
struct igmphdr {
__u8 type;
__u8 code; /* For newer IGMP */
__sum16 csum;
@@ -151,8 +150,7 @@ static inline struct igmpv3_query *
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
struct ip_sf_socklist
{
struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
__be32 sl_addr[0];
@@ -167,16 +165,14 @@ struct ip_sf_socklist
this list never used in fast path code
*/
struct ip_mc_socklist
{
struct ip_mc_socklist {
struct ip_mc_socklist *next;
struct ip_mreqn multi;
unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
struct ip_sf_socklist *sflist;
};
struct ip_sf_list
{
struct ip_sf_list {
struct ip_sf_list *sf_next;
__be32 sf_inaddr;
unsigned long sf_count[2]; /* include/exclude counts */
@@ -185,8 +181,7 @@ struct ip_sf_list
unsigned char sf_crcount; /* retrans. left to send */
};
struct ip_mc_list
{
struct ip_mc_list {
struct in_device *interface;
__be32 multiaddr;
struct ip_sf_list *sources;

View File

@@ -118,14 +118,12 @@ struct in_addr {
/* Request struct for multicast socket ops */
struct ip_mreq
{
struct ip_mreq {
struct in_addr imr_multiaddr; /* IP multicast address of group */
struct in_addr imr_interface; /* local IP address of interface */
};
struct ip_mreqn
{
struct ip_mreqn {
struct in_addr imr_multiaddr; /* IP multicast address of group */
struct in_addr imr_address; /* local IP address of interface */
int imr_ifindex; /* Interface index */
@@ -149,21 +147,18 @@ struct ip_msfilter {
(sizeof(struct ip_msfilter) - sizeof(__u32) \
+ (numsrc) * sizeof(__u32))
struct group_req
{
struct group_req {
__u32 gr_interface; /* interface index */
struct __kernel_sockaddr_storage gr_group; /* group address */
};
struct group_source_req
{
struct group_source_req {
__u32 gsr_interface; /* interface index */
struct __kernel_sockaddr_storage gsr_group; /* group address */
struct __kernel_sockaddr_storage gsr_source; /* source address */
};
struct group_filter
{
struct group_filter {
__u32 gf_interface; /* interface index */
struct __kernel_sockaddr_storage gf_group; /* multicast address */
__u32 gf_fmode; /* filter mode */
@@ -175,8 +170,7 @@ struct group_filter
(sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
+ (numsrc) * sizeof(struct __kernel_sockaddr_storage))
struct in_pktinfo
{
struct in_pktinfo {
int ipi_ifindex;
struct in_addr ipi_spec_dst;
struct in_addr ipi_addr;

View File

@@ -27,10 +27,8 @@
* IPv6 address structure
*/
struct in6_addr
{
union
{
struct in6_addr {
union {
__u8 u6_addr8[16];
__be16 u6_addr16[8];
__be32 u6_addr32[4];
@@ -75,8 +73,7 @@ struct ipv6_mreq {
#define ipv6mr_acaddr ipv6mr_multiaddr
struct in6_flowlabel_req
{
struct in6_flowlabel_req {
struct in6_addr flr_dst;
__be32 flr_label;
__u8 flr_action;

View File

@@ -10,15 +10,13 @@
#include <linux/timer.h>
#include <linux/sysctl.h>
struct ipv4_devconf
{
struct ipv4_devconf {
void *sysctl;
int data[__NET_IPV4_CONF_MAX - 1];
DECLARE_BITMAP(state, __NET_IPV4_CONF_MAX - 1);
};
struct in_device
{
struct in_device {
struct net_device *dev;
atomic_t refcnt;
int dead;
@@ -85,6 +83,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
@@ -110,8 +109,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
struct in_ifaddr
{
struct in_ifaddr {
struct in_ifaddr *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;

View File

@@ -83,16 +83,12 @@ extern struct group_info init_groups;
#define INIT_IDS
#endif
#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
/*
* Because of the reduced scope of CAP_SETPCAP when filesystem
* capabilities are in effect, it is safe to allow CAP_SETPCAP to
* be available in the default configuration.
*/
# define CAP_INIT_BSET CAP_FULL_SET
#else
# define CAP_INIT_BSET CAP_INIT_EFF_SET
#endif
#ifdef CONFIG_TREE_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \

View File

@@ -595,6 +595,8 @@ struct input_absinfo {
#define KEY_NUMERIC_STAR 0x20a
#define KEY_NUMERIC_POUND 0x20b
#define KEY_CAMERA_FOCUS 0x210
/* We avoid low common keys in module aliases so they don't get huge. */
#define KEY_MIN_INTERESTING KEY_MUTE
#define KEY_MAX 0x2ff
@@ -677,6 +679,9 @@ struct input_absinfo {
#define SW_LINEOUT_INSERT 0x06 /* set = inserted */
#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */
#define SW_VIDEOOUT_INSERT 0x08 /* set = inserted */
#define SW_CAMERA_LENS_COVER 0x09 /* set = lens covered */
#define SW_KEYPAD_SLIDE 0x0a /* set = keypad slide out */
#define SW_FRONT_PROXIMITY 0x0b /* set = front proximity sensor active */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)

View File

@@ -603,12 +603,6 @@ static inline void init_irq_proc(void)
}
#endif
#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
extern void debug_poll_all_shared_irqs(void);
#else
static inline void debug_poll_all_shared_irqs(void) { }
#endif
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);

View File

@@ -40,16 +40,11 @@ struct cfq_io_context {
struct io_context *ioc;
unsigned long last_end_request;
sector_t last_request_pos;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
unsigned int seek_samples;
u64 seek_total;
sector_t seek_mean;
struct list_head queue_list;
struct hlist_node cic_list;
@@ -73,6 +68,10 @@ struct io_context {
unsigned short ioprio;
unsigned short ioprio_changed;
#ifdef CONFIG_BLK_CGROUP
unsigned short cgroup_changed;
#endif
/*
* For request batching
*/
@@ -99,14 +98,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
return NULL;
}
struct task_struct;
#ifdef CONFIG_BLOCK
int put_io_context(struct io_context *ioc);
void exit_io_context(void);
void exit_io_context(struct task_struct *task);
struct io_context *get_io_context(gfp_t gfp_flags, int node);
struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
#else
static inline void exit_io_context(void)
static inline void exit_io_context(struct task_struct *task)
{
}

View File

@@ -127,8 +127,7 @@ struct ip_vs_dest_user {
/*
* IPVS statistics object (for user space)
*/
struct ip_vs_stats_user
{
struct ip_vs_stats_user {
__u32 conns; /* connections scheduled */
__u32 inpkts; /* incoming packets */
__u32 outpkts; /* outgoing packets */

View File

@@ -167,6 +167,7 @@ struct ipv6_devconf {
#endif
__s32 disable_ipv6;
__s32 accept_dad;
__s32 force_tllao;
void *sysctl;
};
@@ -207,6 +208,7 @@ enum {
DEVCONF_MC_FORWARDING,
DEVCONF_DISABLE_IPV6,
DEVCONF_ACCEPT_DAD,
DEVCONF_FORCE_TLLAO,
DEVCONF_MAX
};
@@ -503,7 +505,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
#define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \
((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \

View File

@@ -124,6 +124,6 @@
typecheck(unsigned long, flags); \
raw_irqs_disabled_flags(flags); \
})
#endif /* CONFIG_X86 */
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
#endif

View File

@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x);
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
extern unsigned long nsecs_to_jiffies(u64 n);
#define TIMESTAMP_SIZE 30

View File

@@ -15,7 +15,6 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/ratelimit.h>
#include <linux/dynamic_debug.h>
#include <asm/byteorder.h>
#include <asm/bug.h>
@@ -241,8 +240,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
asmlinkage int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2))) __cold;
extern struct ratelimit_state printk_ratelimit_state;
extern int printk_ratelimit(void);
extern int __printk_ratelimit(const char *func);
#define printk_ratelimit() __printk_ratelimit(__func__)
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);

View File

@@ -25,6 +25,7 @@ struct cpu_usage_stat {
cputime64_t iowait;
cputime64_t steal;
cputime64_t guest;
cputime64_t guest_nice;
};
struct kernel_stat {

View File

@@ -296,6 +296,8 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
int disable_kprobe(struct kprobe *kp);
int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
#else /* !CONFIG_KPROBES: */
static inline int kprobes_built_in(void)

View File

@@ -14,12 +14,76 @@
#define KVM_API_VERSION 12
/* for KVM_TRACE_ENABLE, deprecated */
/* *** Deprecated interfaces *** */
#define KVM_TRC_SHIFT 16
#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
#define KVM_TRC_HEAD_SIZE 12
#define KVM_TRC_CYCLE_SIZE 8
#define KVM_TRC_EXTRA_MAX 7
#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
struct kvm_user_trace_setup {
__u32 buf_size; /* sub_buffer size of each per-cpu */
__u32 buf_nr; /* the number of sub_buffers of each per-cpu */
__u32 buf_size;
__u32 buf_nr;
};
#define __KVM_DEPRECATED_MAIN_W_0x06 \
_IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
struct kvm_breakpoint {
__u32 enabled;
__u32 padding;
__u64 address;
};
struct kvm_debug_guest {
__u32 enabled;
__u32 pad;
struct kvm_breakpoint breakpoints[4];
__u32 singlestep;
};
#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
/* *** End of deprecated interfaces *** */
/* for KVM_CREATE_MEMORY_REGION */
struct kvm_memory_region {
__u32 slot;
@@ -99,6 +163,7 @@ struct kvm_pit_config {
/* For KVM_EXIT_INTERNAL_ERROR */
#define KVM_INTERNAL_ERROR_EMULATION 1
#define KVM_INTERNAL_ERROR_SIMUL_EX 2
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
@@ -116,6 +181,11 @@ struct kvm_run {
__u64 cr8;
__u64 apic_base;
#ifdef __KVM_S390
/* the processor status word for s390 */
__u64 psw_mask; /* psw upper half */
__u64 psw_addr; /* psw lower half */
#endif
union {
/* KVM_EXIT_UNKNOWN */
struct {
@@ -167,8 +237,6 @@ struct kvm_run {
/* KVM_EXIT_S390_SIEIC */
struct {
__u8 icptcode;
__u64 mask; /* psw upper half */
__u64 addr; /* psw lower half */
__u16 ipa;
__u32 ipb;
} s390_sieic;
@@ -187,6 +255,9 @@ struct kvm_run {
} dcr;
struct {
__u32 suberror;
/* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
__u32 ndata;
__u64 data[16];
} internal;
/* Fix the size of the union. */
char padding[256];
@@ -329,24 +400,6 @@ struct kvm_ioeventfd {
__u8 pad[36];
};
#define KVM_TRC_SHIFT 16
/*
* kvm trace categories
*/
#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */
/*
* kvm trace action
*/
#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
#define KVM_TRC_HEAD_SIZE 12
#define KVM_TRC_CYCLE_SIZE 8
#define KVM_TRC_EXTRA_MAX 7
#define KVMIO 0xAE
/*
@@ -367,12 +420,10 @@ struct kvm_ioeventfd {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
/*
* ioctls for kvm trace
*/
#define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
#define KVM_TRACE_PAUSE _IO(KVMIO, 0x07)
#define KVM_TRACE_DISABLE _IO(KVMIO, 0x08)
#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
/*
* Extension capability list.
*/
@@ -436,8 +487,15 @@ struct kvm_ioeventfd {
#endif
#define KVM_CAP_IOEVENTFD 36
#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
/* KVM upstream has more features, but we synched this number.
Linux, please remove this comment on rebase. */
#ifdef __KVM_HAVE_XEN_HVM
#define KVM_CAP_XEN_HVM 38
#endif
#define KVM_CAP_ADJUST_CLOCK 39
#define KVM_CAP_INTERNAL_ERROR_DATA 40
#ifdef __KVM_HAVE_VCPU_EVENTS
#define KVM_CAP_VCPU_EVENTS 41
#endif
#define KVM_CAP_S390_PSW 42
#define KVM_CAP_PPC_SEGSTATE 43
#ifdef KVM_CAP_IRQ_ROUTING
@@ -491,6 +549,18 @@ struct kvm_x86_mce {
};
#endif
#ifdef KVM_CAP_XEN_HVM
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
__u64 blob_addr_32;
__u64 blob_addr_64;
__u8 blob_size_32;
__u8 blob_size_64;
__u8 pad2[30];
};
#endif
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
struct kvm_irqfd {
@@ -500,55 +570,66 @@ struct kvm_irqfd {
__u8 pad[20];
};
struct kvm_clock_data {
__u64 clock;
__u32 flags;
__u32 pad[9];
};
/*
* ioctls for VM fds
*/
#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
*/
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level)
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level)
#define KVM_REGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
#define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
struct kvm_assigned_irq)
#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
struct kvm_assigned_pci_dev)
#define KVM_ASSIGN_SET_MSIX_NR \
_IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr)
#define KVM_ASSIGN_SET_MSIX_ENTRY \
_IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry)
#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
struct kvm_assigned_pci_dev)
#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
struct kvm_assigned_msix_nr)
#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
struct kvm_assigned_msix_entry)
#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
#define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config)
#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
/* Available with KVM_CAP_PIT_STATE2 */
#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2)
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
/*
* ioctls for vcpu fds
@@ -561,7 +642,7 @@ struct kvm_irqfd {
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
#define KVM_DEBUG_GUEST __KVM_DEPRECATED_DEBUG_GUEST
#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
@@ -573,7 +654,7 @@ struct kvm_irqfd {
#define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2)
#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
/* Available with KVM_CAP_VAPIC */
#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
/* Available with KVM_CAP_VAPIC */
#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
/* valid for virtual machine (for floating interrupt)_and_ vcpu */
@@ -585,66 +666,23 @@ struct kvm_irqfd {
/* initial ipl psw for s390 */
#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
/* initial reset for s390 */
#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
/* Available with KVM_CAP_NMI */
#define KVM_NMI _IO(KVMIO, 0x9a)
#define KVM_NMI _IO(KVMIO, 0x9a)
/* Available with KVM_CAP_SET_GUEST_DEBUG */
#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
/* MCE for x86 */
#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
/*
* Deprecated interfaces
*/
struct kvm_breakpoint {
__u32 enabled;
__u32 padding;
__u64 address;
};
struct kvm_debug_guest {
__u32 enabled;
__u32 pad;
struct kvm_breakpoint breakpoints[4];
__u32 singlestep;
};
#define __KVM_DEPRECATED_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest)
/* IA64 stack access */
#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *)
#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *)
#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2)
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
/* Available with KVM_CAP_VCPU_EVENTS */
#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
@@ -699,4 +737,4 @@ struct kvm_assigned_msix_entry {
__u16 padding[3];
};
#endif
#endif /* __LINUX_KVM_H */

View File

@@ -120,7 +120,7 @@ struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
int (*set)(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level);
struct kvm *kvm, int irq_source_id, int level);
union {
struct {
unsigned irqchip;
@@ -128,9 +128,28 @@ struct kvm_kernel_irq_routing_entry {
} irqchip;
struct msi_msg msi;
};
struct list_head link;
struct hlist_node link;
};
#ifdef __KVM_HAVE_IOAPIC
struct kvm_irq_routing_table {
int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
struct kvm_kernel_irq_routing_entry *rt_entries;
u32 nr_rt_entries;
/*
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
struct hlist_head map[0];
};
#else
struct kvm_irq_routing_table {};
#endif
struct kvm {
spinlock_t mmu_lock;
spinlock_t requests_lock;
@@ -166,8 +185,9 @@ struct kvm {
struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
struct kvm_irq_routing_table *irq_routing;
struct hlist_head mask_notifier_list;
struct hlist_head irq_ack_notifier_list;
#endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
@@ -266,6 +286,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
@@ -325,7 +346,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_arch_hardware_enable(void *garbage);
int kvm_arch_hardware_enable(void *garbage);
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
@@ -390,7 +411,12 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
#ifdef __KVM_HAVE_IOAPIC
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
union kvm_ioapic_redirect_entry *entry,
unsigned long *deliver_bitmask);
#endif
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
@@ -552,4 +578,21 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
}
#endif
#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
unsigned long arg);
#else
static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
unsigned long arg)
{
return -ENOTTY;
}
#endif
#endif

View File

@@ -365,7 +365,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 5,
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependant */
@@ -595,6 +595,7 @@ struct ata_device {
unsigned int horkage; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
#ifdef CONFIG_ATA_ACPI
acpi_handle acpi_handle;
union acpi_object *gtf_cache;

294
include/linux/lru_cache.h Normal file
View File

@@ -0,0 +1,294 @@
/*
lru_cache.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
drbd is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
drbd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with drbd; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef LRU_CACHE_H
#define LRU_CACHE_H
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/string.h> /* for memset */
#include <linux/seq_file.h>
/*
This header file (and its .c file; kernel-doc of functions see there)
define a helper framework to easily keep track of index:label associations,
and changes to an "active set" of objects, as well as pending transactions,
to persistently record those changes.
We use an LRU policy if it is necessary to "cool down" a region currently in
the active set before we can "heat" a previously unused region.
Because of this later property, it is called "lru_cache".
As it actually Tracks Objects in an Active SeT, we could also call it
toast (incidentally that is what may happen to the data on the
backend storage uppon next resync, if we don't get it right).
What for?
We replicate IO (more or less synchronously) to local and remote disk.
For crash recovery after replication node failure,
we need to resync all regions that have been target of in-flight WRITE IO
(in use, or "hot", regions), as we don't know wether or not those WRITEs have
made it to stable storage.
To avoid a "full resync", we need to persistently track these regions.
This is known as "write intent log", and can be implemented as on-disk
(coarse or fine grained) bitmap, or other meta data.
To avoid the overhead of frequent extra writes to this meta data area,
usually the condition is softened to regions that _may_ have been target of
in-flight WRITE IO, e.g. by only lazily clearing the on-disk write-intent
bitmap, trading frequency of meta data transactions against amount of
(possibly unneccessary) resync traffic.
If we set a hard limit on the area that may be "hot" at any given time, we
limit the amount of resync traffic needed for crash recovery.
For recovery after replication link failure,
we need to resync all blocks that have been changed on the other replica
in the mean time, or, if both replica have been changed independently [*],
all blocks that have been changed on either replica in the mean time.
[*] usually as a result of a cluster split-brain and insufficient protection.
but there are valid use cases to do this on purpose.
Tracking those blocks can be implemented as "dirty bitmap".
Having it fine-grained reduces the amount of resync traffic.
It should also be persistent, to allow for reboots (or crashes)
while the replication link is down.
There are various possible implementations for persistently storing
write intent log information, three of which are mentioned here.
"Chunk dirtying"
The on-disk "dirty bitmap" may be re-used as "write-intent" bitmap as well.
To reduce the frequency of bitmap updates for write-intent log purposes,
one could dirty "chunks" (of some size) at a time of the (fine grained)
on-disk bitmap, while keeping the in-memory "dirty" bitmap as clean as
possible, flushing it to disk again when a previously "hot" (and on-disk
dirtied as full chunk) area "cools down" again (no IO in flight anymore,
and none expected in the near future either).
"Explicit (coarse) write intent bitmap"
An other implementation could chose a (probably coarse) explicit bitmap,
for write-intent log purposes, additionally to the fine grained dirty bitmap.
"Activity log"
Yet an other implementation may keep track of the hot regions, by starting
with an empty set, and writing down a journal of region numbers that have
become "hot", or have "cooled down" again.
To be able to use a ring buffer for this journal of changes to the active
set, we not only record the actual changes to that set, but also record the
not changing members of the set in a round robin fashion. To do so, we use a
fixed (but configurable) number of slots which we can identify by index, and
associate region numbers (labels) with these indices.
For each transaction recording a change to the active set, we record the
change itself (index: -old_label, +new_label), and which index is associated
with which label (index: current_label) within a certain sliding window that
is moved further over the available indices with each such transaction.
Thus, for crash recovery, if the ringbuffer is sufficiently large, we can
accurately reconstruct the active set.
Sufficiently large depends only on maximum number of active objects, and the
size of the sliding window recording "index: current_label" associations within
each transaction.
This is what we call the "activity log".
Currently we need one activity log transaction per single label change, which
does not give much benefit over the "dirty chunks of bitmap" approach, other
than potentially less seeks.
We plan to change the transaction format to support multiple changes per
transaction, which then would reduce several (disjoint, "random") updates to
the bitmap into one transaction to the activity log ring buffer.
*/
/* this defines an element in a tracked set
* .colision is for hash table lookup.
* When we process a new IO request, we know its sector, thus can deduce the
* region number (label) easily. To do the label -> object lookup without a
* full list walk, we use a simple hash table.
*
* .list is on one of three lists:
* in_use: currently in use (refcnt > 0, lc_number != LC_FREE)
* lru: unused but ready to be reused or recycled
* (ts_refcnt == 0, lc_number != LC_FREE),
* free: unused but ready to be recycled
* (ts_refcnt == 0, lc_number == LC_FREE),
*
* an element is said to be "in the active set",
* if either on "in_use" or "lru", i.e. lc_number != LC_FREE.
*
* DRBD currently (May 2009) only uses 61 elements on the resync lru_cache
* (total memory usage 2 pages), and up to 3833 elements on the act_log
* lru_cache, totalling ~215 kB for 64bit architechture, ~53 pages.
*
* We usually do not actually free these objects again, but only "recycle"
* them, as the change "index: -old_label, +LC_FREE" would need a transaction
* as well. Which also means that using a kmem_cache to allocate the objects
* from wastes some resources.
* But it avoids high order page allocations in kmalloc.
*/
struct lc_element {
struct hlist_node colision;
struct list_head list; /* LRU list or free list */
unsigned refcnt;
/* back "pointer" into ts_cache->element[index],
* for paranoia, and for "ts_element_to_index" */
unsigned lc_index;
/* if we want to track a larger set of objects,
* it needs to become arch independend u64 */
unsigned lc_number;
/* special label when on free list */
#define LC_FREE (~0U)
};
struct lru_cache {
/* the least recently used item is kept at lru->prev */
struct list_head lru;
struct list_head free;
struct list_head in_use;
/* the pre-created kmem cache to allocate the objects from */
struct kmem_cache *lc_cache;
/* size of tracked objects, used to memset(,0,) them in lc_reset */
size_t element_size;
/* offset of struct lc_element member in the tracked object */
size_t element_off;
/* number of elements (indices) */
unsigned int nr_elements;
/* Arbitrary limit on maximum tracked objects. Practical limit is much
* lower due to allocation failures, probably. For typical use cases,
* nr_elements should be a few thousand at most.
* This also limits the maximum value of ts_element.ts_index, allowing the
* 8 high bits of .ts_index to be overloaded with flags in the future. */
#define LC_MAX_ACTIVE (1<<24)
/* statistics */
unsigned used; /* number of lelements currently on in_use list */
unsigned long hits, misses, starving, dirty, changed;
/* see below: flag-bits for lru_cache */
unsigned long flags;
/* when changing the label of an index element */
unsigned int new_number;
/* for paranoia when changing the label of an index element */
struct lc_element *changing_element;
void *lc_private;
const char *name;
/* nr_elements there */
struct hlist_head *lc_slot;
struct lc_element **lc_element;
};
/* flag-bits for lru_cache */
enum {
/* debugging aid, to catch concurrent access early.
* user needs to guarantee exclusive access by proper locking! */
__LC_PARANOIA,
/* if we need to change the set, but currently there is a changing
* transaction pending, we are "dirty", and must deferr further
* changing requests */
__LC_DIRTY,
/* if we need to change the set, but currently there is no free nor
* unused element available, we are "starving", and must not give out
* further references, to guarantee that eventually some refcnt will
* drop to zero and we will be able to make progress again, changing
* the set, writing the transaction.
* if the statistics say we are frequently starving,
* nr_elements is too small. */
__LC_STARVING,
};
#define LC_PARANOIA (1<<__LC_PARANOIA)
#define LC_DIRTY (1<<__LC_DIRTY)
#define LC_STARVING (1<<__LC_STARVING)
extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
unsigned e_count, size_t e_size, size_t e_off);
extern void lc_reset(struct lru_cache *lc);
extern void lc_destroy(struct lru_cache *lc);
extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
extern void lc_del(struct lru_cache *lc, struct lc_element *element);
extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
extern void lc_changed(struct lru_cache *lc, struct lc_element *e);
struct seq_file;
extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
void (*detail) (struct seq_file *, struct lc_element *));
/**
* lc_try_lock - can be used to stop lc_get() from changing the tracked set
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
* still change. Returns true if we aquired the lock.
*/
static inline int lc_try_lock(struct lru_cache *lc)
{
return !test_and_set_bit(__LC_DIRTY, &lc->flags);
}
/**
* lc_unlock - unlock @lc, allow lc_get() to change the set again
* @lc: the lru cache to operate on
*/
static inline void lc_unlock(struct lru_cache *lc)
{
clear_bit(__LC_DIRTY, &lc->flags);
smp_mb__after_clear_bit();
}
static inline int lc_is_used(struct lru_cache *lc, unsigned int enr)
{
struct lc_element *e = lc_find(lc, enr);
return e && e->refcnt;
}
#define lc_entry(ptr, type, member) \
container_of(ptr, type, member)
extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i);
extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
#endif

View File

@@ -26,14 +26,15 @@
/* Auxiliary data to use in generating the audit record. */
struct common_audit_data {
char type;
#define LSM_AUDIT_DATA_FS 1
#define LSM_AUDIT_DATA_NET 2
#define LSM_AUDIT_DATA_CAP 3
#define LSM_AUDIT_DATA_IPC 4
#define LSM_AUDIT_DATA_TASK 5
#define LSM_AUDIT_DATA_KEY 6
#define LSM_AUDIT_NO_AUDIT 7
char type;
#define LSM_AUDIT_DATA_FS 1
#define LSM_AUDIT_DATA_NET 2
#define LSM_AUDIT_DATA_CAP 3
#define LSM_AUDIT_DATA_IPC 4
#define LSM_AUDIT_DATA_TASK 5
#define LSM_AUDIT_DATA_KEY 6
#define LSM_AUDIT_NO_AUDIT 7
#define LSM_AUDIT_DATA_KMOD 8
struct task_struct *tsk;
union {
struct {
@@ -66,6 +67,7 @@ struct common_audit_data {
char *key_desc;
} key_struct;
#endif
char *kmod_name;
} u;
/* this union contains LSM specific data */
union {

View File

@@ -96,6 +96,10 @@ struct da9034_touch_pdata {
int y_inverted;
};
struct da9034_backlight_pdata {
int output_current; /* output current of WLED, from 0-31 (in mA) */
};
/* DA9030 battery charger data */
struct power_supply_info;

69
include/linux/mfd/mcp.h Normal file
View File

@@ -0,0 +1,69 @@
/*
* linux/drivers/mfd/mcp.h
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#ifndef MCP_H
#define MCP_H
#include <mach/dma.h>
struct mcp_ops;
struct mcp {
struct module *owner;
struct mcp_ops *ops;
spinlock_t lock;
int use_count;
unsigned int sclk_rate;
unsigned int rw_timeout;
dma_device_t dma_audio_rd;
dma_device_t dma_audio_wr;
dma_device_t dma_telco_rd;
dma_device_t dma_telco_wr;
struct device attached_device;
int gpio_base;
};
struct mcp_ops {
void (*set_telecom_divisor)(struct mcp *, unsigned int);
void (*set_audio_divisor)(struct mcp *, unsigned int);
void (*reg_write)(struct mcp *, unsigned int, unsigned int);
unsigned int (*reg_read)(struct mcp *, unsigned int);
void (*enable)(struct mcp *);
void (*disable)(struct mcp *);
};
void mcp_set_telecom_divisor(struct mcp *, unsigned int);
void mcp_set_audio_divisor(struct mcp *, unsigned int);
void mcp_reg_write(struct mcp *, unsigned int, unsigned int);
unsigned int mcp_reg_read(struct mcp *, unsigned int);
void mcp_enable(struct mcp *);
void mcp_disable(struct mcp *);
#define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate)
struct mcp *mcp_host_alloc(struct device *, size_t);
int mcp_host_register(struct mcp *);
void mcp_host_unregister(struct mcp *);
struct mcp_driver {
struct device_driver drv;
int (*probe)(struct mcp *);
void (*remove)(struct mcp *);
int (*suspend)(struct mcp *, pm_message_t);
int (*resume)(struct mcp *);
};
int mcp_driver_register(struct mcp_driver *);
void mcp_driver_unregister(struct mcp_driver *);
#define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device)
#define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d)
#define mcp_priv(mcp) ((void *)((mcp)+1))
#endif

View File

@@ -0,0 +1,272 @@
/*
* MFD driver for twl4030 codec submodule
*
* Author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
*
* Copyright: (C) 2009 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#ifndef __TWL4030_CODEC_H__
#define __TWL4030_CODEC_H__
/* Codec registers */
#define TWL4030_REG_CODEC_MODE 0x01
#define TWL4030_REG_OPTION 0x02
#define TWL4030_REG_UNKNOWN 0x03
#define TWL4030_REG_MICBIAS_CTL 0x04
#define TWL4030_REG_ANAMICL 0x05
#define TWL4030_REG_ANAMICR 0x06
#define TWL4030_REG_AVADC_CTL 0x07
#define TWL4030_REG_ADCMICSEL 0x08
#define TWL4030_REG_DIGMIXING 0x09
#define TWL4030_REG_ATXL1PGA 0x0A
#define TWL4030_REG_ATXR1PGA 0x0B
#define TWL4030_REG_AVTXL2PGA 0x0C
#define TWL4030_REG_AVTXR2PGA 0x0D
#define TWL4030_REG_AUDIO_IF 0x0E
#define TWL4030_REG_VOICE_IF 0x0F
#define TWL4030_REG_ARXR1PGA 0x10
#define TWL4030_REG_ARXL1PGA 0x11
#define TWL4030_REG_ARXR2PGA 0x12
#define TWL4030_REG_ARXL2PGA 0x13
#define TWL4030_REG_VRXPGA 0x14
#define TWL4030_REG_VSTPGA 0x15
#define TWL4030_REG_VRX2ARXPGA 0x16
#define TWL4030_REG_AVDAC_CTL 0x17
#define TWL4030_REG_ARX2VTXPGA 0x18
#define TWL4030_REG_ARXL1_APGA_CTL 0x19
#define TWL4030_REG_ARXR1_APGA_CTL 0x1A
#define TWL4030_REG_ARXL2_APGA_CTL 0x1B
#define TWL4030_REG_ARXR2_APGA_CTL 0x1C
#define TWL4030_REG_ATX2ARXPGA 0x1D
#define TWL4030_REG_BT_IF 0x1E
#define TWL4030_REG_BTPGA 0x1F
#define TWL4030_REG_BTSTPGA 0x20
#define TWL4030_REG_EAR_CTL 0x21
#define TWL4030_REG_HS_SEL 0x22
#define TWL4030_REG_HS_GAIN_SET 0x23
#define TWL4030_REG_HS_POPN_SET 0x24
#define TWL4030_REG_PREDL_CTL 0x25
#define TWL4030_REG_PREDR_CTL 0x26
#define TWL4030_REG_PRECKL_CTL 0x27
#define TWL4030_REG_PRECKR_CTL 0x28
#define TWL4030_REG_HFL_CTL 0x29
#define TWL4030_REG_HFR_CTL 0x2A
#define TWL4030_REG_ALC_CTL 0x2B
#define TWL4030_REG_ALC_SET1 0x2C
#define TWL4030_REG_ALC_SET2 0x2D
#define TWL4030_REG_BOOST_CTL 0x2E
#define TWL4030_REG_SOFTVOL_CTL 0x2F
#define TWL4030_REG_DTMF_FREQSEL 0x30
#define TWL4030_REG_DTMF_TONEXT1H 0x31
#define TWL4030_REG_DTMF_TONEXT1L 0x32
#define TWL4030_REG_DTMF_TONEXT2H 0x33
#define TWL4030_REG_DTMF_TONEXT2L 0x34
#define TWL4030_REG_DTMF_TONOFF 0x35
#define TWL4030_REG_DTMF_WANONOFF 0x36
#define TWL4030_REG_I2S_RX_SCRAMBLE_H 0x37
#define TWL4030_REG_I2S_RX_SCRAMBLE_M 0x38
#define TWL4030_REG_I2S_RX_SCRAMBLE_L 0x39
#define TWL4030_REG_APLL_CTL 0x3A
#define TWL4030_REG_DTMF_CTL 0x3B
#define TWL4030_REG_DTMF_PGA_CTL2 0x3C
#define TWL4030_REG_DTMF_PGA_CTL1 0x3D
#define TWL4030_REG_MISC_SET_1 0x3E
#define TWL4030_REG_PCMBTMUX 0x3F
#define TWL4030_REG_RX_PATH_SEL 0x43
#define TWL4030_REG_VDL_APGA_CTL 0x44
#define TWL4030_REG_VIBRA_CTL 0x45
#define TWL4030_REG_VIBRA_SET 0x46
#define TWL4030_REG_VIBRA_PWM_SET 0x47
#define TWL4030_REG_ANAMIC_GAIN 0x48
#define TWL4030_REG_MISC_SET_2 0x49
/* Bitfield Definitions */
/* TWL4030_CODEC_MODE (0x01) Fields */
#define TWL4030_APLL_RATE 0xF0
#define TWL4030_APLL_RATE_8000 0x00
#define TWL4030_APLL_RATE_11025 0x10
#define TWL4030_APLL_RATE_12000 0x20
#define TWL4030_APLL_RATE_16000 0x40
#define TWL4030_APLL_RATE_22050 0x50
#define TWL4030_APLL_RATE_24000 0x60
#define TWL4030_APLL_RATE_32000 0x80
#define TWL4030_APLL_RATE_44100 0x90
#define TWL4030_APLL_RATE_48000 0xA0
#define TWL4030_APLL_RATE_96000 0xE0
#define TWL4030_SEL_16K 0x08
#define TWL4030_CODECPDZ 0x02
#define TWL4030_OPT_MODE 0x01
#define TWL4030_OPTION_1 (1 << 0)
#define TWL4030_OPTION_2 (0 << 0)
/* TWL4030_OPTION (0x02) Fields */
#define TWL4030_ATXL1_EN (1 << 0)
#define TWL4030_ATXR1_EN (1 << 1)
#define TWL4030_ATXL2_VTXL_EN (1 << 2)
#define TWL4030_ATXR2_VTXR_EN (1 << 3)
#define TWL4030_ARXL1_VRX_EN (1 << 4)
#define TWL4030_ARXR1_EN (1 << 5)
#define TWL4030_ARXL2_EN (1 << 6)
#define TWL4030_ARXR2_EN (1 << 7)
/* TWL4030_REG_MICBIAS_CTL (0x04) Fields */
#define TWL4030_MICBIAS2_CTL 0x40
#define TWL4030_MICBIAS1_CTL 0x20
#define TWL4030_HSMICBIAS_EN 0x04
#define TWL4030_MICBIAS2_EN 0x02
#define TWL4030_MICBIAS1_EN 0x01
/* ANAMICL (0x05) Fields */
#define TWL4030_CNCL_OFFSET_START 0x80
#define TWL4030_OFFSET_CNCL_SEL 0x60
#define TWL4030_OFFSET_CNCL_SEL_ARX1 0x00
#define TWL4030_OFFSET_CNCL_SEL_ARX2 0x20
#define TWL4030_OFFSET_CNCL_SEL_VRX 0x40
#define TWL4030_OFFSET_CNCL_SEL_ALL 0x60
#define TWL4030_MICAMPL_EN 0x10
#define TWL4030_CKMIC_EN 0x08
#define TWL4030_AUXL_EN 0x04
#define TWL4030_HSMIC_EN 0x02
#define TWL4030_MAINMIC_EN 0x01
/* ANAMICR (0x06) Fields */
#define TWL4030_MICAMPR_EN 0x10
#define TWL4030_AUXR_EN 0x04
#define TWL4030_SUBMIC_EN 0x01
/* AVADC_CTL (0x07) Fields */
#define TWL4030_ADCL_EN 0x08
#define TWL4030_AVADC_CLK_PRIORITY 0x04
#define TWL4030_ADCR_EN 0x02
/* TWL4030_REG_ADCMICSEL (0x08) Fields */
#define TWL4030_DIGMIC1_EN 0x08
#define TWL4030_TX2IN_SEL 0x04
#define TWL4030_DIGMIC0_EN 0x02
#define TWL4030_TX1IN_SEL 0x01
/* AUDIO_IF (0x0E) Fields */
#define TWL4030_AIF_SLAVE_EN 0x80
#define TWL4030_DATA_WIDTH 0x60
#define TWL4030_DATA_WIDTH_16S_16W 0x00
#define TWL4030_DATA_WIDTH_32S_16W 0x40
#define TWL4030_DATA_WIDTH_32S_24W 0x60
#define TWL4030_AIF_FORMAT 0x18
#define TWL4030_AIF_FORMAT_CODEC 0x00
#define TWL4030_AIF_FORMAT_LEFT 0x08
#define TWL4030_AIF_FORMAT_RIGHT 0x10
#define TWL4030_AIF_FORMAT_TDM 0x18
#define TWL4030_AIF_TRI_EN 0x04
#define TWL4030_CLK256FS_EN 0x02
#define TWL4030_AIF_EN 0x01
/* VOICE_IF (0x0F) Fields */
#define TWL4030_VIF_SLAVE_EN 0x80
#define TWL4030_VIF_DIN_EN 0x40
#define TWL4030_VIF_DOUT_EN 0x20
#define TWL4030_VIF_SWAP 0x10
#define TWL4030_VIF_FORMAT 0x08
#define TWL4030_VIF_TRI_EN 0x04
#define TWL4030_VIF_SUB_EN 0x02
#define TWL4030_VIF_EN 0x01
/* EAR_CTL (0x21) */
#define TWL4030_EAR_GAIN 0x30
/* HS_GAIN_SET (0x23) Fields */
#define TWL4030_HSR_GAIN 0x0C
#define TWL4030_HSR_GAIN_PWR_DOWN 0x00
#define TWL4030_HSR_GAIN_PLUS_6DB 0x04
#define TWL4030_HSR_GAIN_0DB 0x08
#define TWL4030_HSR_GAIN_MINUS_6DB 0x0C
#define TWL4030_HSL_GAIN 0x03
#define TWL4030_HSL_GAIN_PWR_DOWN 0x00
#define TWL4030_HSL_GAIN_PLUS_6DB 0x01
#define TWL4030_HSL_GAIN_0DB 0x02
#define TWL4030_HSL_GAIN_MINUS_6DB 0x03
/* HS_POPN_SET (0x24) Fields */
#define TWL4030_VMID_EN 0x40
#define TWL4030_EXTMUTE 0x20
#define TWL4030_RAMP_DELAY 0x1C
#define TWL4030_RAMP_DELAY_20MS 0x00
#define TWL4030_RAMP_DELAY_40MS 0x04
#define TWL4030_RAMP_DELAY_81MS 0x08
#define TWL4030_RAMP_DELAY_161MS 0x0C
#define TWL4030_RAMP_DELAY_323MS 0x10
#define TWL4030_RAMP_DELAY_645MS 0x14
#define TWL4030_RAMP_DELAY_1291MS 0x18
#define TWL4030_RAMP_DELAY_2581MS 0x1C
#define TWL4030_RAMP_EN 0x02
/* PREDL_CTL (0x25) */
#define TWL4030_PREDL_GAIN 0x30
/* PREDR_CTL (0x26) */
#define TWL4030_PREDR_GAIN 0x30
/* PRECKL_CTL (0x27) */
#define TWL4030_PRECKL_GAIN 0x30
/* PRECKR_CTL (0x28) */
#define TWL4030_PRECKR_GAIN 0x30
/* HFL_CTL (0x29, 0x2A) Fields */
#define TWL4030_HF_CTL_HB_EN 0x04
#define TWL4030_HF_CTL_LOOP_EN 0x08
#define TWL4030_HF_CTL_RAMP_EN 0x10
#define TWL4030_HF_CTL_REF_EN 0x20
/* APLL_CTL (0x3A) Fields */
#define TWL4030_APLL_EN 0x10
#define TWL4030_APLL_INFREQ 0x0F
#define TWL4030_APLL_INFREQ_19200KHZ 0x05
#define TWL4030_APLL_INFREQ_26000KHZ 0x06
#define TWL4030_APLL_INFREQ_38400KHZ 0x0F
/* REG_MISC_SET_1 (0x3E) Fields */
#define TWL4030_CLK64_EN 0x80
#define TWL4030_SCRAMBLE_EN 0x40
#define TWL4030_FMLOOP_EN 0x20
#define TWL4030_SMOOTH_ANAVOL_EN 0x02
#define TWL4030_DIGMIC_LR_SWAP_EN 0x01
/* VIBRA_CTL (0x45) */
#define TWL4030_VIBRA_EN 0x01
#define TWL4030_VIBRA_DIR 0x02
#define TWL4030_VIBRA_AUDIO_SEL_L1 (0x00 << 2)
#define TWL4030_VIBRA_AUDIO_SEL_R1 (0x01 << 2)
#define TWL4030_VIBRA_AUDIO_SEL_L2 (0x02 << 2)
#define TWL4030_VIBRA_AUDIO_SEL_R2 (0x03 << 2)
#define TWL4030_VIBRA_SEL 0x10
#define TWL4030_VIBRA_DIR_SEL 0x20
/* TWL4030 codec resource IDs */
enum twl4030_codec_res {
TWL4030_CODEC_RES_POWER = 0,
TWL4030_CODEC_RES_APLL,
TWL4030_CODEC_RES_MAX,
};
int twl4030_codec_disable_resource(enum twl4030_codec_res id);
int twl4030_codec_enable_resource(enum twl4030_codec_res id);
unsigned int twl4030_codec_get_mclk(void);
#endif /* End of __TWL4030_CODEC_H__ */

258
include/linux/mfd/ucb1x00.h Normal file
View File

@@ -0,0 +1,258 @@
/*
* linux/include/mfd/ucb1x00.h
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#ifndef UCB1200_H
#define UCB1200_H
#include <linux/mfd/mcp.h>
#include <linux/gpio.h>
#define UCB_IO_DATA 0x00
#define UCB_IO_DIR 0x01
#define UCB_IO_0 (1 << 0)
#define UCB_IO_1 (1 << 1)
#define UCB_IO_2 (1 << 2)
#define UCB_IO_3 (1 << 3)
#define UCB_IO_4 (1 << 4)
#define UCB_IO_5 (1 << 5)
#define UCB_IO_6 (1 << 6)
#define UCB_IO_7 (1 << 7)
#define UCB_IO_8 (1 << 8)
#define UCB_IO_9 (1 << 9)
#define UCB_IE_RIS 0x02
#define UCB_IE_FAL 0x03
#define UCB_IE_STATUS 0x04
#define UCB_IE_CLEAR 0x04
#define UCB_IE_ADC (1 << 11)
#define UCB_IE_TSPX (1 << 12)
#define UCB_IE_TSMX (1 << 13)
#define UCB_IE_TCLIP (1 << 14)
#define UCB_IE_ACLIP (1 << 15)
#define UCB_IRQ_TSPX 12
#define UCB_TC_A 0x05
#define UCB_TC_A_LOOP (1 << 7) /* UCB1200 */
#define UCB_TC_A_AMPL (1 << 7) /* UCB1300 */
#define UCB_TC_B 0x06
#define UCB_TC_B_VOICE_ENA (1 << 3)
#define UCB_TC_B_CLIP (1 << 4)
#define UCB_TC_B_ATT (1 << 6)
#define UCB_TC_B_SIDE_ENA (1 << 11)
#define UCB_TC_B_MUTE (1 << 13)
#define UCB_TC_B_IN_ENA (1 << 14)
#define UCB_TC_B_OUT_ENA (1 << 15)
#define UCB_AC_A 0x07
#define UCB_AC_B 0x08
#define UCB_AC_B_LOOP (1 << 8)
#define UCB_AC_B_MUTE (1 << 13)
#define UCB_AC_B_IN_ENA (1 << 14)
#define UCB_AC_B_OUT_ENA (1 << 15)
#define UCB_TS_CR 0x09
#define UCB_TS_CR_TSMX_POW (1 << 0)
#define UCB_TS_CR_TSPX_POW (1 << 1)
#define UCB_TS_CR_TSMY_POW (1 << 2)
#define UCB_TS_CR_TSPY_POW (1 << 3)
#define UCB_TS_CR_TSMX_GND (1 << 4)
#define UCB_TS_CR_TSPX_GND (1 << 5)
#define UCB_TS_CR_TSMY_GND (1 << 6)
#define UCB_TS_CR_TSPY_GND (1 << 7)
#define UCB_TS_CR_MODE_INT (0 << 8)
#define UCB_TS_CR_MODE_PRES (1 << 8)
#define UCB_TS_CR_MODE_POS (2 << 8)
#define UCB_TS_CR_BIAS_ENA (1 << 11)
#define UCB_TS_CR_TSPX_LOW (1 << 12)
#define UCB_TS_CR_TSMX_LOW (1 << 13)
#define UCB_ADC_CR 0x0a
#define UCB_ADC_SYNC_ENA (1 << 0)
#define UCB_ADC_VREFBYP_CON (1 << 1)
#define UCB_ADC_INP_TSPX (0 << 2)
#define UCB_ADC_INP_TSMX (1 << 2)
#define UCB_ADC_INP_TSPY (2 << 2)
#define UCB_ADC_INP_TSMY (3 << 2)
#define UCB_ADC_INP_AD0 (4 << 2)
#define UCB_ADC_INP_AD1 (5 << 2)
#define UCB_ADC_INP_AD2 (6 << 2)
#define UCB_ADC_INP_AD3 (7 << 2)
#define UCB_ADC_EXT_REF (1 << 5)
#define UCB_ADC_START (1 << 7)
#define UCB_ADC_ENA (1 << 15)
#define UCB_ADC_DATA 0x0b
#define UCB_ADC_DAT_VAL (1 << 15)
#define UCB_ADC_DAT(x) (((x) & 0x7fe0) >> 5)
#define UCB_ID 0x0c
#define UCB_ID_1200 0x1004
#define UCB_ID_1300 0x1005
#define UCB_ID_TC35143 0x9712
#define UCB_MODE 0x0d
#define UCB_MODE_DYN_VFLAG_ENA (1 << 12)
#define UCB_MODE_AUD_OFF_CAN (1 << 13)
struct ucb1x00_irq {
void *devid;
void (*fn)(int, void *);
};
struct ucb1x00 {
spinlock_t lock;
struct mcp *mcp;
unsigned int irq;
struct semaphore adc_sem;
spinlock_t io_lock;
u16 id;
u16 io_dir;
u16 io_out;
u16 adc_cr;
u16 irq_fal_enbl;
u16 irq_ris_enbl;
struct ucb1x00_irq irq_handler[16];
struct device dev;
struct list_head node;
struct list_head devs;
struct gpio_chip gpio;
};
struct ucb1x00_driver;
struct ucb1x00_dev {
struct list_head dev_node;
struct list_head drv_node;
struct ucb1x00 *ucb;
struct ucb1x00_driver *drv;
void *priv;
};
struct ucb1x00_driver {
struct list_head node;
struct list_head devs;
int (*add)(struct ucb1x00_dev *dev);
void (*remove)(struct ucb1x00_dev *dev);
int (*suspend)(struct ucb1x00_dev *dev, pm_message_t state);
int (*resume)(struct ucb1x00_dev *dev);
};
#define classdev_to_ucb1x00(cd) container_of(cd, struct ucb1x00, dev)
int ucb1x00_register_driver(struct ucb1x00_driver *);
void ucb1x00_unregister_driver(struct ucb1x00_driver *);
/**
* ucb1x00_clkrate - return the UCB1x00 SIB clock rate
* @ucb: UCB1x00 structure describing chip
*
* Return the SIB clock rate in Hz.
*/
static inline unsigned int ucb1x00_clkrate(struct ucb1x00 *ucb)
{
return mcp_get_sclk_rate(ucb->mcp);
}
/**
* ucb1x00_enable - enable the UCB1x00 SIB clock
* @ucb: UCB1x00 structure describing chip
*
* Enable the SIB clock. This can be called multiple times.
*/
static inline void ucb1x00_enable(struct ucb1x00 *ucb)
{
mcp_enable(ucb->mcp);
}
/**
* ucb1x00_disable - disable the UCB1x00 SIB clock
* @ucb: UCB1x00 structure describing chip
*
* Disable the SIB clock. The SIB clock will only be disabled
* when the number of ucb1x00_enable calls match the number of
* ucb1x00_disable calls.
*/
static inline void ucb1x00_disable(struct ucb1x00 *ucb)
{
mcp_disable(ucb->mcp);
}
/**
* ucb1x00_reg_write - write a UCB1x00 register
* @ucb: UCB1x00 structure describing chip
* @reg: UCB1x00 4-bit register index to write
* @val: UCB1x00 16-bit value to write
*
* Write the UCB1x00 register @reg with value @val. The SIB
* clock must be running for this function to return.
*/
static inline void ucb1x00_reg_write(struct ucb1x00 *ucb, unsigned int reg, unsigned int val)
{
mcp_reg_write(ucb->mcp, reg, val);
}
/**
* ucb1x00_reg_read - read a UCB1x00 register
* @ucb: UCB1x00 structure describing chip
* @reg: UCB1x00 4-bit register index to write
*
* Read the UCB1x00 register @reg and return its value. The SIB
* clock must be running for this function to return.
*/
static inline unsigned int ucb1x00_reg_read(struct ucb1x00 *ucb, unsigned int reg)
{
return mcp_reg_read(ucb->mcp, reg);
}
/**
* ucb1x00_set_audio_divisor -
* @ucb: UCB1x00 structure describing chip
* @div: SIB clock divisor
*/
static inline void ucb1x00_set_audio_divisor(struct ucb1x00 *ucb, unsigned int div)
{
mcp_set_audio_divisor(ucb->mcp, div);
}
/**
* ucb1x00_set_telecom_divisor -
* @ucb: UCB1x00 structure describing chip
* @div: SIB clock divisor
*/
static inline void ucb1x00_set_telecom_divisor(struct ucb1x00 *ucb, unsigned int div)
{
mcp_set_telecom_divisor(ucb->mcp, div);
}
void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int, unsigned int);
void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int, unsigned int);
unsigned int ucb1x00_io_read(struct ucb1x00 *ucb);
#define UCB_NOSYNC (0)
#define UCB_SYNC (1)
unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync);
void ucb1x00_adc_enable(struct ucb1x00 *ucb);
void ucb1x00_adc_disable(struct ucb1x00 *ucb);
/*
* Which edges of the IRQ do you want to control today?
*/
#define UCB_RISING (1 << 0)
#define UCB_FALLING (1 << 1)
int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid);
void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges);
void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges);
int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid);
#endif

View File

@@ -1212,7 +1212,7 @@
#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */
#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
#define WM831X_ISINK_MAX_ISEL 56
extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL];
#define WM831X_ISINK_MAX_ISEL 55
extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
#endif

View File

@@ -28,6 +28,7 @@
#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405
#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103

View File

@@ -59,20 +59,24 @@ struct vifctl {
unsigned char vifc_flags; /* VIFF_ flags */
unsigned char vifc_threshold; /* ttl limit */
unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
struct in_addr vifc_lcl_addr; /* Our address */
union {
struct in_addr vifc_lcl_addr; /* Local interface address */
int vifc_lcl_ifindex; /* Local interface index */
};
struct in_addr vifc_rmt_addr; /* IPIP tunnel addr */
};
#define VIFF_TUNNEL 0x1 /* IPIP tunnel */
#define VIFF_SRCRT 0x2 /* NI */
#define VIFF_REGISTER 0x4 /* register vif */
#define VIFF_TUNNEL 0x1 /* IPIP tunnel */
#define VIFF_SRCRT 0x2 /* NI */
#define VIFF_REGISTER 0x4 /* register vif */
#define VIFF_USE_IFINDEX 0x8 /* use vifc_lcl_ifindex instead of
vifc_lcl_addr to find an interface */
/*
* Cache manipulation structures for mrouted and PIMd
*/
struct mfcctl
{
struct mfcctl {
struct in_addr mfcc_origin; /* Origin of mcast */
struct in_addr mfcc_mcastgrp; /* Group in question */
vifi_t mfcc_parent; /* Where it arrived */
@@ -87,8 +91,7 @@ struct mfcctl
* Group count retrieval for mrouted
*/
struct sioc_sg_req
{
struct sioc_sg_req {
struct in_addr src;
struct in_addr grp;
unsigned long pktcnt;
@@ -100,8 +103,7 @@ struct sioc_sg_req
* To get vif packet counts
*/
struct sioc_vif_req
{
struct sioc_vif_req {
vifi_t vifi; /* Which iface */
unsigned long icount; /* In packets */
unsigned long ocount; /* Out packets */
@@ -114,8 +116,7 @@ struct sioc_vif_req
* data. Magically happens to be like an IP packet as per the original
*/
struct igmpmsg
{
struct igmpmsg {
__u32 unused1,unused2;
unsigned char im_msgtype; /* What is this */
unsigned char im_mbz; /* Must be zero */
@@ -176,8 +177,7 @@ static inline int ip_mr_init(void)
}
#endif
struct vif_device
{
struct vif_device {
struct net_device *dev; /* Device we are using */
unsigned long bytes_in,bytes_out;
unsigned long pkt_in,pkt_out; /* Statistics */
@@ -190,8 +190,7 @@ struct vif_device
#define VIFF_STATIC 0x8000
struct mfc_cache
{
struct mfc_cache {
struct mfc_cache *next; /* Next entry on cache line */
#ifdef CONFIG_NET_NS
struct net *mfc_net;

View File

@@ -75,8 +75,7 @@ struct mif6ctl {
* Cache manipulation structures for mrouted and PIMd
*/
struct mf6cctl
{
struct mf6cctl {
struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */
struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */
mifi_t mf6cc_parent; /* Where it arrived */
@@ -87,8 +86,7 @@ struct mf6cctl
* Group count retrieval for pim6sd
*/
struct sioc_sg_req6
{
struct sioc_sg_req6 {
struct sockaddr_in6 src;
struct sockaddr_in6 grp;
unsigned long pktcnt;
@@ -100,8 +98,7 @@ struct sioc_sg_req6
* To get vif packet counts
*/
struct sioc_mif_req6
{
struct sioc_mif_req6 {
mifi_t mifi; /* Which iface */
unsigned long icount; /* In packets */
unsigned long ocount; /* Out packets */
@@ -172,8 +169,7 @@ static inline void ip6_mr_cleanup(void)
}
#endif
struct mif_device
{
struct mif_device {
struct net_device *dev; /* Device we are using */
unsigned long bytes_in,bytes_out;
unsigned long pkt_in,pkt_out; /* Statistics */
@@ -185,8 +181,7 @@ struct mif_device
#define VIFF_STATIC 0x8000
struct mfc6_cache
{
struct mfc6_cache {
struct mfc6_cache *next; /* Next entry on cache line */
#ifdef CONFIG_NET_NS
struct net *mfc6_net;

View File

@@ -4,8 +4,7 @@
#include <linux/types.h>
#include <linux/netlink.h>
struct ndmsg
{
struct ndmsg {
__u8 ndm_family;
__u8 ndm_pad1;
__u16 ndm_pad2;
@@ -15,8 +14,7 @@ struct ndmsg
__u8 ndm_type;
};
enum
{
enum {
NDA_UNSPEC,
NDA_DST,
NDA_LLADDR,
@@ -56,8 +54,7 @@ enum
NUD_PERMANENT is also cannot be deleted by garbage collectors.
*/
struct nda_cacheinfo
{
struct nda_cacheinfo {
__u32 ndm_confirmed;
__u32 ndm_used;
__u32 ndm_updated;
@@ -89,8 +86,7 @@ struct nda_cacheinfo
* device.
****/
struct ndt_stats
{
struct ndt_stats {
__u64 ndts_allocs;
__u64 ndts_destroys;
__u64 ndts_hash_grows;
@@ -124,15 +120,13 @@ enum {
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
struct ndtmsg
{
struct ndtmsg {
__u8 ndtm_family;
__u8 ndtm_pad1;
__u16 ndtm_pad2;
};
struct ndt_config
{
struct ndt_config {
__u16 ndtc_key_len;
__u16 ndtc_entry_size;
__u32 ndtc_entries;

View File

@@ -41,6 +41,7 @@
#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
#define SYS_ACCEPT4 18 /* sys_accept4(2) */
#define SYS_RECVMMSG 19 /* sys_recvmmsg(2) */
typedef enum {
SS_FREE = 0, /* not allocated */
@@ -198,9 +199,13 @@ struct proto_ops {
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
};
#define DECLARE_SOCKADDR(type, dst, src) \
type dst = ({ __sockaddr_check_size(sizeof(*dst)); (type) src; })
struct net_proto_family {
int family;
int (*create)(struct net *net, struct socket *sock, int protocol);
int (*create)(struct net *net, struct socket *sock,
int protocol, int kern);
struct module *owner;
};
@@ -263,89 +268,6 @@ extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
extern int kernel_sock_shutdown(struct socket *sock,
enum sock_shutdown_cmd how);
#ifndef CONFIG_SMP
#define SOCKOPS_WRAPPED(name) name
#define SOCKOPS_WRAP(name, fam)
#else
#define SOCKOPS_WRAPPED(name) __unlocked_##name
#define SOCKCALL_WRAP(name, call, parms, args) \
static int __lock_##name##_##call parms \
{ \
int ret; \
lock_kernel(); \
ret = __unlocked_##name##_ops.call args ;\
unlock_kernel(); \
return ret; \
}
#define SOCKCALL_UWRAP(name, call, parms, args) \
static unsigned int __lock_##name##_##call parms \
{ \
int ret; \
lock_kernel(); \
ret = __unlocked_##name##_ops.call args ;\
unlock_kernel(); \
return ret; \
}
#define SOCKOPS_WRAP(name, fam) \
SOCKCALL_WRAP(name, release, (struct socket *sock), (sock)) \
SOCKCALL_WRAP(name, bind, (struct socket *sock, struct sockaddr *uaddr, int addr_len), \
(sock, uaddr, addr_len)) \
SOCKCALL_WRAP(name, connect, (struct socket *sock, struct sockaddr * uaddr, \
int addr_len, int flags), \
(sock, uaddr, addr_len, flags)) \
SOCKCALL_WRAP(name, socketpair, (struct socket *sock1, struct socket *sock2), \
(sock1, sock2)) \
SOCKCALL_WRAP(name, accept, (struct socket *sock, struct socket *newsock, \
int flags), (sock, newsock, flags)) \
SOCKCALL_WRAP(name, getname, (struct socket *sock, struct sockaddr *uaddr, \
int *addr_len, int peer), (sock, uaddr, addr_len, peer)) \
SOCKCALL_UWRAP(name, poll, (struct file *file, struct socket *sock, struct poll_table_struct *wait), \
(file, sock, wait)) \
SOCKCALL_WRAP(name, ioctl, (struct socket *sock, unsigned int cmd, \
unsigned long arg), (sock, cmd, arg)) \
SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \
unsigned long arg), (sock, cmd, arg)) \
SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \
SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \
SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \
char __user *optval, unsigned int optlen), (sock, level, optname, optval, optlen)) \
SOCKCALL_WRAP(name, getsockopt, (struct socket *sock, int level, int optname, \
char __user *optval, int __user *optlen), (sock, level, optname, optval, optlen)) \
SOCKCALL_WRAP(name, sendmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len), \
(iocb, sock, m, len)) \
SOCKCALL_WRAP(name, recvmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags), \
(iocb, sock, m, len, flags)) \
SOCKCALL_WRAP(name, mmap, (struct file *file, struct socket *sock, struct vm_area_struct *vma), \
(file, sock, vma)) \
\
static const struct proto_ops name##_ops = { \
.family = fam, \
.owner = THIS_MODULE, \
.release = __lock_##name##_release, \
.bind = __lock_##name##_bind, \
.connect = __lock_##name##_connect, \
.socketpair = __lock_##name##_socketpair, \
.accept = __lock_##name##_accept, \
.getname = __lock_##name##_getname, \
.poll = __lock_##name##_poll, \
.ioctl = __lock_##name##_ioctl, \
.compat_ioctl = __lock_##name##_compat_ioctl, \
.listen = __lock_##name##_listen, \
.shutdown = __lock_##name##_shutdown, \
.setsockopt = __lock_##name##_setsockopt, \
.getsockopt = __lock_##name##_getsockopt, \
.sendmsg = __lock_##name##_sendmsg, \
.recvmsg = __lock_##name##_recvmsg, \
.mmap = __lock_##name##_mmap, \
};
#endif
#define MODULE_ALIAS_NETPROTO(proto) \
MODULE_ALIAS("net-pf-" __stringify(proto))
@@ -358,6 +280,7 @@ static const struct proto_ops name##_ops = { \
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#include <linux/ratelimit.h>
extern struct ratelimit_state net_ratelimit_state;
#endif

View File

@@ -63,30 +63,69 @@ struct wireless_dev;
#define HAVE_FREE_NETDEV /* free_netdev() */
#define HAVE_NETDEV_PRIV /* netdev_priv() */
#define NET_XMIT_SUCCESS 0
#define NET_XMIT_DROP 1 /* skb dropped */
#define NET_XMIT_CN 2 /* congestion notification */
#define NET_XMIT_POLICED 3 /* skb is shot by police */
#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
*
* - qdisc return codes
* - driver transmit return codes
* - errno values
*
* Drivers are allowed to return any one of those in their hard_start_xmit()
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
* higher layers. Virtual network devices transmit synchronously, in this case
* the driver transmit return codes are consumed by dev_queue_xmit(), all
* others are propagated to higher layers.
*/
/* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x01 /* skb dropped */
#define NET_XMIT_CN 0x02 /* congestion notification */
#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
* indicates that the device will soon be dropping packets, or already drops
* some packets of the same priority; prompting us to send less aggressively. */
#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
/* Driver transmit return codes */
#define NETDEV_TX_MASK 0xf0
enum netdev_tx {
NETDEV_TX_OK = 0, /* driver took care of packet */
NETDEV_TX_BUSY, /* driver tx path was busy*/
NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
};
typedef enum netdev_tx netdev_tx_t;
/*
* Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
* hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
*/
static inline bool dev_xmit_complete(int rc)
{
/*
* Positive cases with an skb consumed by a driver:
* - successful transmission (rc == NETDEV_TX_OK)
* - error while transmitting (rc < 0)
* - error while queueing to a different device (rc & NET_XMIT_MASK)
*/
if (likely(rc < NET_XMIT_MASK))
return true;
return false;
}
#endif
#define MAX_ADDR_LEN 32 /* Largest hardware address length */
@@ -125,8 +164,7 @@ typedef enum netdev_tx netdev_tx_t;
* with byte counters.
*/
struct net_device_stats
{
struct net_device_stats {
unsigned long rx_packets; /* total packets received */
unsigned long tx_packets; /* total packets transmitted */
unsigned long rx_bytes; /* total bytes received */
@@ -179,8 +217,7 @@ struct neighbour;
struct neigh_parms;
struct sk_buff;
struct netif_rx_stats
{
struct netif_rx_stats {
unsigned total;
unsigned dropped;
unsigned time_squeeze;
@@ -189,8 +226,7 @@ struct netif_rx_stats
DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
struct dev_addr_list
{
struct dev_addr_list {
struct dev_addr_list *next;
u8 da_addr[MAX_ADDR_LEN];
u8 da_addrlen;
@@ -227,8 +263,7 @@ struct netdev_hw_addr_list {
int count;
};
struct hh_cache
{
struct hh_cache {
struct hh_cache *hh_next; /* Next entry */
atomic_t hh_refcnt; /* number of users */
/*
@@ -291,8 +326,7 @@ struct header_ops {
* code.
*/
enum netdev_state_t
{
enum netdev_state_t {
__LINK_STATE_START,
__LINK_STATE_PRESENT,
__LINK_STATE_NOCARRIER,
@@ -341,20 +375,20 @@ struct napi_struct {
struct sk_buff *skb;
};
enum
{
enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
};
enum {
enum gro_result {
GRO_MERGED,
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
GRO_DROP,
};
typedef enum gro_result gro_result_t;
extern void __napi_schedule(struct napi_struct *n);
@@ -457,8 +491,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
# define napi_synchronize(n) barrier()
#endif
enum netdev_queue_state_t
{
enum netdev_queue_state_t {
__QUEUE_STATE_XOFF,
__QUEUE_STATE_FROZEN,
};
@@ -635,6 +668,10 @@ struct net_device_ops {
unsigned int sgc);
int (*ndo_fcoe_ddp_done)(struct net_device *dev,
u16 xid);
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
};
@@ -648,8 +685,7 @@ struct net_device_ops {
* moves out.
*/
struct net_device
{
struct net_device {
/*
* This is the first field of the "visible" part of this structure
@@ -683,6 +719,7 @@ struct net_device
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
/* Net device features */
unsigned long features;
@@ -859,7 +896,7 @@ struct net_device
/* device index hash chain */
struct hlist_node index_hlist;
struct net_device *link_watch_next;
struct list_head link_watch_list;
/* register/unregister state machine */
enum { NETREG_UNINITIALIZED=0,
@@ -894,8 +931,8 @@ struct net_device
/* class/net/name entry */
struct device dev;
/* space for optional statistics and wireless sysfs groups */
const struct attribute_group *sysfs_groups[3];
/* space for optional device, statistics, and wireless sysfs groups */
const struct attribute_group *sysfs_groups[4];
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
@@ -909,7 +946,7 @@ struct net_device
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
struct dcbnl_rtnl_ops *dcbnl_ops;
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
@@ -1075,10 +1112,16 @@ extern rwlock_t dev_base_lock; /* Device list lock */
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_rcu(net, d) \
list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_safe(net, d, n) \
list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d) \
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev)
@@ -1091,6 +1134,16 @@ static inline struct net_device *next_net_device(struct net_device *dev)
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *next_net_device_rcu(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = rcu_dereference(dev->dev_list.next);
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *first_net_device(struct net *net)
{
return list_empty(&net->dev_base_head) ? NULL :
@@ -1109,6 +1162,7 @@ extern void __dev_remove_pack(struct packet_type *pt);
extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
extern struct net_device *dev_get_by_name(struct net *net, const char *name);
extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
extern int dev_alloc_name(struct net_device *dev, const char *name);
extern int dev_open(struct net_device *dev);
@@ -1116,7 +1170,14 @@ extern int dev_close(struct net_device *dev);
extern void dev_disable_lro(struct net_device *dev);
extern int dev_queue_xmit(struct sk_buff *skb);
extern int register_netdevice(struct net_device *dev);
extern void unregister_netdevice(struct net_device *dev);
extern void unregister_netdevice_queue(struct net_device *dev,
struct list_head *head);
extern void unregister_netdevice_many(struct list_head *head);
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
}
extern void free_netdev(struct net_device *dev);
extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
@@ -1127,6 +1188,7 @@ extern void netdev_resync_ops(struct net_device *dev);
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
extern int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
extern int netpoll_trap(void);
@@ -1212,8 +1274,7 @@ static inline int unregister_gifconf(unsigned int family)
* Incoming packets are placed on per-cpu queues so that
* no locking is needed.
*/
struct softnet_data
{
struct softnet_data {
struct Qdisc *output_queue;
struct sk_buff_head input_pkt_queue;
struct list_head poll_list;
@@ -1467,18 +1528,19 @@ extern int netif_rx_ni(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
extern void napi_gro_flush(struct napi_struct *napi);
extern int dev_gro_receive(struct napi_struct *napi,
extern gro_result_t dev_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern int napi_skb_finish(int ret, struct sk_buff *skb);
extern int napi_gro_receive(struct napi_struct *napi,
extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
extern gro_result_t napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern void napi_reuse_skb(struct napi_struct *napi,
struct sk_buff *skb);
extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
extern int napi_frags_finish(struct napi_struct *napi,
struct sk_buff *skb, int ret);
extern gro_result_t napi_frags_finish(struct napi_struct *napi,
struct sk_buff *skb,
gro_result_t ret);
extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
extern int napi_gro_frags(struct napi_struct *napi);
extern gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -1502,6 +1564,8 @@ extern int dev_set_mac_address(struct net_device *,
extern int dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq);
extern int dev_forward_skb(struct net_device *dev,
struct sk_buff *skb);
extern int netdev_budget;
@@ -1540,6 +1604,7 @@ static inline void dev_hold(struct net_device *dev)
*/
extern void linkwatch_fire_event(struct net_device *dev);
extern void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -1609,7 +1674,8 @@ static inline int netif_dormant(const struct net_device *dev)
*
* Check if carrier is operational
*/
static inline int netif_oper_up(const struct net_device *dev) {
static inline int netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
}
@@ -1880,6 +1946,7 @@ extern void netdev_features_change(struct net_device *dev);
extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
extern int netdev_max_backlog;
extern int weight_p;
@@ -1914,6 +1981,9 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one,
unsigned long mask);
unsigned long netdev_fix_features(unsigned long features, const char *name);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
static inline int net_gso_ok(int features, int gso_type)
{
int feature = gso_type << NETIF_F_GSO_SHIFT;

View File

@@ -93,8 +93,7 @@ typedef unsigned int nf_hookfn(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *));
struct nf_hook_ops
{
struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */
@@ -106,8 +105,7 @@ struct nf_hook_ops
int priority;
};
struct nf_sockopt_ops
{
struct nf_sockopt_ops {
struct list_head list;
u_int8_t pf;

View File

@@ -3,8 +3,7 @@
/* Connection state tracking for netfilter. This is separated from,
but required by, the NAT layer; it can also be used by an iptables
extension. */
enum ip_conntrack_info
{
enum ip_conntrack_info {
/* Part of an established connection (either direction). */
IP_CT_ESTABLISHED,
@@ -76,8 +75,7 @@ enum ip_conntrack_status {
};
#ifdef __KERNEL__
struct ip_conntrack_stat
{
struct ip_conntrack_stat {
unsigned int searched;
unsigned int found;
unsigned int new;

View File

@@ -3,8 +3,7 @@
/* FTP tracking. */
/* This enum is exposed to userspace */
enum nf_ct_ftp_type
{
enum nf_ct_ftp_type {
/* PORT command from client */
NF_CT_FTP_PORT,
/* PASV response from server */

View File

@@ -16,8 +16,7 @@ enum sctp_conntrack {
SCTP_CONNTRACK_MAX
};
struct ip_ct_sctp
{
struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];

View File

@@ -55,8 +55,7 @@ struct ip_ct_tcp_state {
u_int8_t flags; /* per direction options */
};
struct ip_ct_tcp
{
struct ip_ct_tcp {
struct ip_ct_tcp_state seen[2]; /* connection parameters per direction */
u_int8_t state; /* state of the connection (enum tcp_conntrack) */
/* For detecting stale connections */
@@ -67,6 +66,9 @@ struct ip_ct_tcp
u_int32_t last_ack; /* Last sequence number seen in opposite dir */
u_int32_t last_end; /* Last seq + len */
u_int16_t last_win; /* Last window advertisement seen in dir */
/* For SYN packets while we may be out-of-sync */
u_int8_t last_wscale; /* Last window scaling factor seen */
u_int8_t last_flags; /* Last flags set */
};
#endif /* __KERNEL__ */

View File

@@ -55,8 +55,7 @@ struct nfgenmsg {
#include <linux/capability.h>
#include <net/netlink.h>
struct nfnl_callback
{
struct nfnl_callback {
int (*call)(struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
@@ -64,8 +63,7 @@ struct nfnl_callback
const u_int16_t attr_count; /* number of nlattr's */
};
struct nfnetlink_subsystem
{
struct nfnetlink_subsystem {
const char *name;
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */

View File

@@ -21,8 +21,7 @@
* ! nfnetlink use the same attributes methods. - J. Schulist.
*/
struct nfattr
{
struct nfattr {
__u16 nfa_len;
__u16 nfa_type; /* we use 15 bits for the type, and the highest
* bit to indicate whether the payload is nested */

View File

@@ -6,8 +6,7 @@
#define XT_FUNCTION_MAXNAMELEN 30
#define XT_TABLE_MAXNAMELEN 32
struct xt_entry_match
{
struct xt_entry_match {
union {
struct {
__u16 match_size;
@@ -31,8 +30,7 @@ struct xt_entry_match
unsigned char data[0];
};
struct xt_entry_target
{
struct xt_entry_target {
union {
struct {
__u16 target_size;
@@ -64,16 +62,14 @@ struct xt_entry_target
}, \
}
struct xt_standard_target
{
struct xt_standard_target {
struct xt_entry_target target;
int verdict;
};
/* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
* kernel supports, if >= revision. */
struct xt_get_revision
{
struct xt_get_revision {
char name[XT_FUNCTION_MAXNAMELEN-1];
__u8 revision;
@@ -90,8 +86,7 @@ struct xt_get_revision
* ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my
* personal pleasure to remove it -HW
*/
struct _xt_align
{
struct _xt_align {
__u8 u8;
__u16 u16;
__u32 u32;
@@ -109,14 +104,12 @@ struct _xt_align
#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
struct xt_counters
{
struct xt_counters {
__u64 pcnt, bcnt; /* Packet and byte counters */
};
/* The argument to IPT_SO_ADD_COUNTERS. */
struct xt_counters_info
{
struct xt_counters_info {
/* Which table. */
char name[XT_TABLE_MAXNAMELEN];
@@ -269,8 +262,7 @@ struct xt_tgdtor_param {
u_int8_t family;
};
struct xt_match
{
struct xt_match {
struct list_head list;
const char name[XT_FUNCTION_MAXNAMELEN-1];
@@ -310,8 +302,7 @@ struct xt_match
};
/* Registration hooks for targets. */
struct xt_target
{
struct xt_target {
struct list_head list;
const char name[XT_FUNCTION_MAXNAMELEN-1];
@@ -349,8 +340,7 @@ struct xt_target
};
/* Furniture shopping... */
struct xt_table
{
struct xt_table {
struct list_head list;
/* What hooks you will enter on */
@@ -371,8 +361,7 @@ struct xt_table
#include <linux/netfilter_ipv4.h>
/* The table itself */
struct xt_table_info
{
struct xt_table_info {
/* Size per table */
unsigned int size;
/* Number of entries: FIXME. --RR */
@@ -528,8 +517,7 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
#ifdef CONFIG_COMPAT
#include <net/compat.h>
struct compat_xt_entry_match
{
struct compat_xt_entry_match {
union {
struct {
u_int16_t match_size;
@@ -545,8 +533,7 @@ struct compat_xt_entry_match
unsigned char data[0];
};
struct compat_xt_entry_target
{
struct compat_xt_entry_target {
union {
struct {
u_int16_t target_size;
@@ -566,8 +553,7 @@ struct compat_xt_entry_target
* need to change whole approach in order to calculate align as function of
* current task alignment */
struct compat_xt_counters
{
struct compat_xt_counters {
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
u_int32_t cnt[4];
#else
@@ -575,8 +561,7 @@ struct compat_xt_counters
#endif
};
struct compat_xt_counters_info
{
struct compat_xt_counters_info {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t num_counters;
struct compat_xt_counters counters[0];

View File

@@ -15,8 +15,7 @@ enum xt_connbytes_direction {
XT_CONNBYTES_DIR_BOTH,
};
struct xt_connbytes_info
{
struct xt_connbytes_info {
struct {
aligned_u64 from; /* count to be matched */
aligned_u64 to; /* count to be matched */

Some files were not shown because too many files have changed in this diff Show More