Merge branch 'linus' into irq/threaded

Conflicts:
	include/linux/irq.h
	kernel/irq/handle.c
This commit is contained in:
Ingo Molnar
2009-04-06 01:41:22 +02:00
7424 changed files with 841787 additions and 291611 deletions

View File

@@ -67,6 +67,7 @@ header-y += falloc.h
header-y += fd.h
header-y += fdreg.h
header-y += fib_rules.h
header-y += fiemap.h
header-y += firewire-cdev.h
header-y += firewire-constants.h
header-y += fuse.h
@@ -115,6 +116,7 @@ header-y += mqueue.h
header-y += mtio.h
header-y += ncp_no.h
header-y += neighbour.h
header-y += net_dropmon.h
header-y += netfilter_arp.h
header-y += netrom.h
header-y += nfs2.h
@@ -157,8 +159,6 @@ header-y += ultrasound.h
header-y += un.h
header-y += utime.h
header-y += veth.h
header-y += video_decoder.h
header-y += video_encoder.h
header-y += videotext.h
header-y += x25.h

View File

@@ -97,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
/* the following four functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
@@ -257,6 +258,40 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
#endif /* CONFIG_PM_SLEEP */
#define OSC_QUERY_TYPE 0
#define OSC_SUPPORT_TYPE 1
#define OSC_CONTROL_TYPE 2
#define OSC_SUPPORT_MASKS 0x1f
/* _OSC DW0 Definition */
#define OSC_QUERY_ENABLE 1
#define OSC_REQUEST_ERROR 2
#define OSC_INVALID_UUID_ERROR 4
#define OSC_INVALID_REVISION_ERROR 8
#define OSC_CAPABILITIES_MASK_ERROR 16
/* _OSC DW1 Definition (OS Support Fields) */
#define OSC_EXT_PCI_CONFIG_SUPPORT 1
#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
#define OSC_MSI_SUPPORT 16
/* _OSC DW1 Definition (OS Control Fields) */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
#define OSC_SHPC_NATIVE_HP_CONTROL 2
#define OSC_PCI_EXPRESS_PME_CONTROL 4
#define OSC_PCI_EXPRESS_AER_CONTROL 8
#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
OSC_SHPC_NATIVE_HP_CONTROL | \
OSC_PCI_EXPRESS_PME_CONTROL | \
OSC_PCI_EXPRESS_AER_CONTROL | \
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
#else /* CONFIG_ACPI */
static inline int early_acpi_boot_init(void)

View File

@@ -77,20 +77,20 @@ typedef struct _agp_setup {
* The "prot" down below needs still a "sleep" flag somehow ...
*/
typedef struct _agp_segment {
off_t pg_start; /* starting page to populate */
size_t pg_count; /* number of pages */
int prot; /* prot flags for mmap */
__kernel_off_t pg_start; /* starting page to populate */
__kernel_size_t pg_count; /* number of pages */
int prot; /* prot flags for mmap */
} agp_segment;
typedef struct _agp_region {
pid_t pid; /* pid of process */
size_t seg_count; /* number of segments */
__kernel_pid_t pid; /* pid of process */
__kernel_size_t seg_count; /* number of segments */
struct _agp_segment *seg_list;
} agp_region;
typedef struct _agp_allocate {
int key; /* tag of allocation */
size_t pg_count; /* number of pages */
__kernel_size_t pg_count;/* number of pages */
__u32 type; /* 0 == normal, other devspec */
__u32 physical; /* device specific (some devices
* need a phys address of the
@@ -100,7 +100,7 @@ typedef struct _agp_allocate {
typedef struct _agp_bind {
int key; /* tag of allocation */
off_t pg_start; /* starting page to populate */
__kernel_off_t pg_start;/* starting page to populate */
} agp_bind;
typedef struct _agp_unbind {

View File

@@ -235,8 +235,6 @@ struct Outgoing {
struct arcnet_local {
struct net_device_stats stats;
uint8_t config, /* current value of CONFIG register */
timeout, /* Extended timeout for COM20020 */
backplane, /* Backplane flag for COM20020 */
@@ -335,7 +333,12 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
struct net_device *alloc_arcdev(char *name);
struct net_device *alloc_arcdev(const char *name);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
void arcnet_timeout(struct net_device *dev);
#endif /* __KERNEL__ */
#endif /* _LINUX_ARCDEVICE_H */

View File

@@ -21,6 +21,15 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
/* on architectures without dma-mapping capabilities we need to ensure
* that the asynchronous path compiles away
*/
#ifdef CONFIG_HAS_DMA
#define __async_inline
#else
#define __async_inline __always_inline
#endif
/**
* dma_chan_ref - object used to manage dma channels received from the
* dmaengine core.

View File

@@ -108,6 +108,8 @@ enum {
ATA_PIO5 = ATA_PIO4 | (1 << 5),
ATA_PIO6 = ATA_PIO5 | (1 << 6),
ATA_PIO4_ONLY = (1 << 4),
ATA_SWDMA0 = (1 << 0),
ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1),
ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2),
@@ -117,6 +119,8 @@ enum {
ATA_MWDMA0 = (1 << 0),
ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1),
ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2),
ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3),
ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4),
ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2),
ATA_MWDMA2_ONLY = (1 << 2),
@@ -131,6 +135,8 @@ enum {
ATA_UDMA7 = ATA_UDMA6 | (1 << 7),
/* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */
ATA_UDMA24_ONLY = (1 << 2) | (1 << 4),
ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */
/* DMA-related */
@@ -244,8 +250,6 @@ enum {
ATA_CMD_MEDIA_UNLOCK = 0xDF,
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
/* EXABYTE specific */
ATA_EXABYTE_ENABLE_NEST = 0xF0,
/* READ_LOG_EXT pages */
ATA_LOG_SATA_NCQ = 0x10,

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2008 Atheros Communications Inc.
* Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
* Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _LINUX_ATH9K_PLATFORM_H
#define _LINUX_ATH9K_PLATFORM_H
#define ATH9K_PLAT_EEP_MAX_WORDS 2048
struct ath9k_platform_data {
u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
};
#endif /* _LINUX_ATH9K_PLATFORM_H */

View File

@@ -11,6 +11,7 @@
#include <linux/atmioc.h>
#include <linux/atm.h>
#include <linux/if_ether.h>
#include <linux/types.h>
/* ATM lec daemon control socket */
#define ATMLEC_CTRL _IO('a', ATMIOC_LANE)
@@ -78,8 +79,8 @@ struct atmlec_msg {
} normal;
struct atmlec_config_msg config;
struct {
uint16_t lec_id; /* requestor lec_id */
uint32_t tran_id; /* transaction id */
__u16 lec_id; /* requestor lec_id */
__u32 tran_id; /* transaction id */
unsigned char mac_addr[ETH_ALEN]; /* dst mac addr */
unsigned char atm_addr[ATM_ESA_LEN]; /* reqestor ATM addr */
} proxy; /*

View File

@@ -4,6 +4,7 @@
#include <linux/atmapi.h>
#include <linux/atmioc.h>
#include <linux/atm.h>
#include <linux/types.h>
#define ATMMPC_CTRL _IO('a', ATMIOC_MPOA)
#define ATMMPC_DATA _IO('a', ATMIOC_MPOA+1)
@@ -18,39 +19,39 @@ struct atmmpc_ioc {
};
typedef struct in_ctrl_info {
uint8_t Last_NHRP_CIE_code;
uint8_t Last_Q2931_cause_value;
uint8_t eg_MPC_ATM_addr[ATM_ESA_LEN];
__u8 Last_NHRP_CIE_code;
__u8 Last_Q2931_cause_value;
__u8 eg_MPC_ATM_addr[ATM_ESA_LEN];
__be32 tag;
__be32 in_dst_ip; /* IP address this ingress MPC sends packets to */
uint16_t holding_time;
uint32_t request_id;
__u16 holding_time;
__u32 request_id;
} in_ctrl_info;
typedef struct eg_ctrl_info {
uint8_t DLL_header[256];
uint8_t DH_length;
__u8 DLL_header[256];
__u8 DH_length;
__be32 cache_id;
__be32 tag;
__be32 mps_ip;
__be32 eg_dst_ip; /* IP address to which ingress MPC sends packets */
uint8_t in_MPC_data_ATM_addr[ATM_ESA_LEN];
uint16_t holding_time;
__u8 in_MPC_data_ATM_addr[ATM_ESA_LEN];
__u16 holding_time;
} eg_ctrl_info;
struct mpc_parameters {
uint16_t mpc_p1; /* Shortcut-Setup Frame Count */
uint16_t mpc_p2; /* Shortcut-Setup Frame Time */
uint8_t mpc_p3[8]; /* Flow-detection Protocols */
uint16_t mpc_p4; /* MPC Initial Retry Time */
uint16_t mpc_p5; /* MPC Retry Time Maximum */
uint16_t mpc_p6; /* Hold Down Time */
__u16 mpc_p1; /* Shortcut-Setup Frame Count */
__u16 mpc_p2; /* Shortcut-Setup Frame Time */
__u8 mpc_p3[8]; /* Flow-detection Protocols */
__u16 mpc_p4; /* MPC Initial Retry Time */
__u16 mpc_p5; /* MPC Retry Time Maximum */
__u16 mpc_p6; /* Hold Down Time */
} ;
struct k_message {
uint16_t type;
__u16 type;
__be32 ip_mask;
uint8_t MPS_ctrl[ATM_ESA_LEN];
__u8 MPS_ctrl[ATM_ESA_LEN];
union {
in_ctrl_info in_info;
eg_ctrl_info eg_info;
@@ -61,11 +62,11 @@ struct k_message {
struct llc_snap_hdr {
/* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */
uint8_t dsap; /* Destination Service Access Point (0xAA) */
uint8_t ssap; /* Source Service Access Point (0xAA) */
uint8_t ui; /* Unnumbered Information (0x03) */
uint8_t org[3]; /* Organizational identification (0x000000) */
uint8_t type[2]; /* Ether type (for IP) (0x0800) */
__u8 dsap; /* Destination Service Access Point (0xAA) */
__u8 ssap; /* Source Service Access Point (0xAA) */
__u8 ui; /* Unnumbered Information (0x03) */
__u8 org[3]; /* Organizational identification (0x000000) */
__u8 type[2]; /* Ether type (for IP) (0x0800) */
};
/* TLVs this MPC recognizes */

View File

@@ -36,7 +36,8 @@
* 1500 - 1599 kernel LSPP events
* 1600 - 1699 kernel crypto events
* 1700 - 1799 kernel anomaly records
* 1800 - 1999 future kernel use (maybe integrity labels and related events)
* 1800 - 1899 kernel integrity events
* 1900 - 1999 future kernel use
* 2000 is for otherwise unclassified kernel audit messages (legacy)
* 2001 - 2099 unused (kernel)
* 2100 - 2199 user space anomaly records
@@ -125,6 +126,12 @@
#define AUDIT_LAST_KERN_ANOM_MSG 1799
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */
#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */
#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */
#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */
#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */

View File

@@ -10,8 +10,13 @@
#ifndef _LINUX_AUTO_DEV_IOCTL_H
#define _LINUX_AUTO_DEV_IOCTL_H
#include <linux/auto_fs.h>
#ifdef __KERNEL__
#include <linux/string.h>
#include <linux/types.h>
#else
#include <string.h>
#endif /* __KERNEL__ */
#define AUTOFS_DEVICE_NAME "autofs"

View File

@@ -17,10 +17,12 @@
#ifdef __KERNEL__
#include <linux/fs.h>
#include <linux/limits.h>
#include <asm/types.h>
#endif /* __KERNEL__ */
#include <linux/types.h>
#include <linux/ioctl.h>
#else
#include <asm/types.h>
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
/* This file describes autofs v3 */
#define AUTOFS_PROTO_VERSION 3

View File

@@ -35,8 +35,7 @@ struct linux_binprm{
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
unsigned int sh_bang:1,
misc_bang:1,
unsigned int
cred_prepared:1,/* true if creds already prepared (multiple
* preps happen for interpreters) */
cap_effective:1;/* true if has elevated effective capabilities,

View File

@@ -426,9 +426,6 @@ struct bio_set {
unsigned int front_pad;
mempool_t *bio_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
mempool_t *bio_integrity_pool;
#endif
mempool_t *bvec_pool;
};
@@ -519,9 +516,8 @@ static inline int bio_has_data(struct bio *bio)
#define bio_integrity(bio) (bio->bi_integrity != NULL)
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern void bio_integrity_free(struct bio *, struct bio_set *);
extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern int bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -531,27 +527,21 @@ extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
extern void bio_integrity_init_slab(void);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define bio_integrity(a) (0)
#define bioset_integrity_create(a, b) (0)
#define bio_integrity_prep(a) (0)
#define bio_integrity_enabled(a) (0)
#define bio_integrity_clone(a, b, c,d ) (0)
#define bioset_integrity_free(a) do { } while (0)
#define bio_integrity_free(a, b) do { } while (0)
#define bio_integrity_clone(a, b, c) (0)
#define bio_integrity_free(a) do { } while (0)
#define bio_integrity_endio(a, b) do { } while (0)
#define bio_integrity_advance(a, b) do { } while (0)
#define bio_integrity_trim(a, b, c) do { } while (0)
#define bio_integrity_split(a, b, c) do { } while (0)
#define bio_integrity_set_tag(a, b, c) do { } while (0)
#define bio_integrity_get_tag(a, b, c) do { } while (0)
#define bio_integrity_init_slab(a) do { } while (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */

View File

@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
#include <linux/sysfs.h>
struct blk_trace {
int trace_state;
struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)

View File

@@ -146,10 +146,10 @@ extern void *alloc_large_system_hash(const char *tablename,
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
/* Only NUMA needs hash distribution.
* IA64 and x86_64 have sufficient vmalloc space.
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64))
#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
#define HASHDIST_DEFAULT 1
#else
#define HASHDIST_DEFAULT 0

View File

@@ -1,12 +1,22 @@
#ifndef BSG_H
#define BSG_H
#include <linux/types.h>
#define BSG_PROTOCOL_SCSI 0
#define BSG_SUB_PROTOCOL_SCSI_CMD 0
#define BSG_SUB_PROTOCOL_SCSI_TMF 1
#define BSG_SUB_PROTOCOL_SCSI_TRANSPORT 2
/*
* For flags member below
* sg.h sg_io_hdr also has bits defined for it's flags member. However
* none of these bits are implemented/used by bsg. The bits below are
* allocated to not conflict with sg.h ones anyway.
*/
#define BSG_FLAG_Q_AT_TAIL 0x10 /* default, == 0 at this bit, is Q_AT_HEAD */
struct sg_io_v4 {
__s32 guard; /* [i] 'Q' to differentiate from v3 */
__u32 protocol; /* [i] 0 -> SCSI , .... */

View File

@@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping);
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
void mark_buffer_async_write(struct buffer_head *bh);
void invalidate_bdev(struct block_device *);
int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
int fsync_bdev(struct block_device *);
struct super_block *freeze_bdev(struct block_device *);
int thaw_bdev(struct block_device *, struct super_block *);
int fsync_super(struct super_block *);
int fsync_no_super(struct block_device *);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
@@ -223,7 +216,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
void block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
@@ -339,22 +332,10 @@ extern int __set_page_dirty_buffers(struct page *page);
static inline void buffer_init(void) {}
static inline int try_to_free_buffers(struct page *page) { return 1; }
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
static inline void invalidate_bdev(struct block_device *bdev) {}
static inline struct super_block *freeze_bdev(struct block_device *sb)
{
return NULL;
}
static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
return 0;
}
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_BUFFER_HEAD_H */

View File

@@ -15,6 +15,7 @@
#include <linux/cgroupstats.h>
#include <linux/prio_heap.h>
#include <linux/rwsem.h>
#include <linux/idr.h>
#ifdef CONFIG_CGROUPS
@@ -22,6 +23,7 @@ struct cgroupfs_root;
struct cgroup_subsys;
struct inode;
struct cgroup;
struct css_id;
extern int cgroup_init_early(void);
extern int cgroup_init(void);
@@ -47,18 +49,24 @@ enum cgroup_subsys_id {
/* Per-subsystem/per-cgroup state maintained by the system. */
struct cgroup_subsys_state {
/* The cgroup that this subsystem is attached to. Useful
/*
* The cgroup that this subsystem is attached to. Useful
* for subsystems that want to know about the cgroup
* hierarchy structure */
* hierarchy structure
*/
struct cgroup *cgroup;
/* State maintained by the cgroup system to allow subsystems
/*
* State maintained by the cgroup system to allow subsystems
* to be "busy". Should be accessed via css_get(),
* css_tryget() and and css_put(). */
* css_tryget() and and css_put().
*/
atomic_t refcnt;
unsigned long flags;
/* ID for this css, if possible */
struct css_id *id;
};
/* bits in struct cgroup_subsys_state flags field */
@@ -120,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css)
enum {
/* Control Group is dead */
CGRP_REMOVED,
/* Control Group has previously had a child cgroup or a task,
* but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
/*
* Control Group has previously had a child cgroup or a task,
* but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
*/
CGRP_RELEASABLE,
/* Control Group requires release notifications to userspace */
CGRP_NOTIFY_ON_RELEASE,
/*
* A thread in rmdir() is wating for this cgroup.
*/
CGRP_WAIT_ON_RMDIR,
};
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
/* count users of this cgroup. >0 means busy, but doesn't
* necessarily indicate the number of tasks in the
* cgroup */
/*
* count users of this cgroup. >0 means busy, but doesn't
* necessarily indicate the number of tasks in the cgroup
*/
atomic_t count;
/*
@@ -142,7 +157,7 @@ struct cgroup {
struct list_head sibling; /* my parent's children */
struct list_head children; /* my children */
struct cgroup *parent; /* my parent */
struct cgroup *parent; /* my parent */
struct dentry *dentry; /* cgroup fs entry, RCU protected */
/* Private pointers for each registered subsystem */
@@ -177,11 +192,12 @@ struct cgroup {
struct rcu_head rcu_head;
};
/* A css_set is a structure holding pointers to a set of
/*
* A css_set is a structure holding pointers to a set of
* cgroup_subsys_state objects. This saves space in the task struct
* object and speeds up fork()/exit(), since a single inc/dec and a
* list_add()/del() can bump the reference count on the entire
* cgroup set for a task.
* list_add()/del() can bump the reference count on the entire cgroup
* set for a task.
*/
struct css_set {
@@ -226,13 +242,8 @@ struct cgroup_map_cb {
void *state;
};
/* struct cftype:
*
* The files in the cgroup filesystem mostly have a very simple read/write
* handling, some common function will take care of it. Nevertheless some cases
* (read tasks) are special and therefore I define this structure for every
* kind of file.
*
/*
* struct cftype: handler definitions for cgroup control files
*
* When reading/writing to a file:
* - the cgroup to use is file->f_dentry->d_parent->d_fsdata
@@ -241,10 +252,17 @@ struct cgroup_map_cb {
#define MAX_CFTYPE_NAME 64
struct cftype {
/* By convention, the name should begin with the name of the
* subsystem, followed by a period */
/*
* By convention, the name should begin with the name of the
* subsystem, followed by a period
*/
char name[MAX_CFTYPE_NAME];
int private;
/*
* If not 0, file mode is set to this value, otherwise it will
* be figured out automatically
*/
mode_t mode;
/*
* If non-zero, defines the maximum length of string that can
@@ -319,15 +337,20 @@ struct cgroup_scanner {
void (*process_task)(struct task_struct *p,
struct cgroup_scanner *scan);
struct ptr_heap *heap;
void *data;
};
/* Add a new file to the given cgroup directory. Should only be
* called by subsystems from within a populate() method */
/*
* Add a new file to the given cgroup directory. Should only be
* called by subsystems from within a populate() method
*/
int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
const struct cftype *cft);
/* Add a set of new files to the given cgroup directory. Should
* only be called by subsystems from within a populate() method */
/*
* Add a set of new files to the given cgroup directory. Should
* only be called by subsystems from within a populate() method
*/
int cgroup_add_files(struct cgroup *cgrp,
struct cgroup_subsys *subsys,
const struct cftype cft[],
@@ -339,15 +362,18 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
int cgroup_task_count(const struct cgroup *cgrp);
/* Return true if the cgroup is a descendant of the current cgroup */
int cgroup_is_descendant(const struct cgroup *cgrp);
/* Return true if cgrp is a descendant of the task's cgroup */
int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
/* Control Group subsystem type. See Documentation/cgroups.txt for details */
/*
* Control Group subsystem type.
* See Documentation/cgroups/cgroups.txt for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss,
struct cgroup *cgrp, struct task_struct *tsk);
@@ -364,6 +390,11 @@ struct cgroup_subsys {
int active;
int disabled;
int early_init;
/*
* True if this subsys uses ID. ID is not available before cgroup_init()
* (not available in early_init time.)
*/
bool use_id;
#define MAX_CGROUP_TYPE_NAMELEN 32
const char *name;
@@ -386,6 +417,9 @@ struct cgroup_subsys {
*/
struct cgroupfs_root *root;
struct list_head sibling;
/* used when use_id == true */
struct idr idr;
spinlock_t id_lock;
};
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -419,7 +453,8 @@ struct cgroup_iter {
struct list_head *task;
};
/* To iterate across the tasks in a cgroup:
/*
* To iterate across the tasks in a cgroup:
*
* 1) call cgroup_iter_start to intialize an iterator
*
@@ -428,9 +463,10 @@ struct cgroup_iter {
*
* 3) call cgroup_iter_end() to destroy the iterator.
*
* Or, call cgroup_scan_tasks() to iterate through every task in a cpuset.
* - cgroup_scan_tasks() holds the css_set_lock when calling the test_task()
* callback, but not while calling the process_task() callback.
* Or, call cgroup_scan_tasks() to iterate through every task in a
* cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
* the test_task() callback, but not while calling the process_task()
* callback.
*/
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
@@ -439,6 +475,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
* if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
* CSS ID is assigned at cgroup allocation (create) automatically
* and removed when subsys calls free_css_id() function. This is because
* the lifetime of cgroup_subsys_state is subsys's matter.
*
* Looking up and scanning function should be called under rcu_read_lock().
* Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
* But the css returned by this routine can be "not populated yet" or "being
* destroyed". The caller should check css and cgroup's status.
*/
/*
* Typically Called at ->destroy(), or somewhere the subsys frees
* cgroup_subsys_state.
*/
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
/* Find a cgroup_subsys_state which has given ID */
struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
/*
* Get a cgroup whose id is greater than or equal to id under tree of root.
* Returning a cgroup_subsys_state or NULL.
*/
struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
struct cgroup_subsys_state *root, int *foundid);
/* Returns true if root is ancestor of cg */
bool css_is_ancestor(struct cgroup_subsys_state *cg,
const struct cgroup_subsys_state *root);
/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
unsigned short css_depth(struct cgroup_subsys_state *css);
#else /* !CONFIG_CGROUPS */
static inline int cgroup_init_early(void) { return 0; }

View File

@@ -125,4 +125,21 @@ int clk_set_parent(struct clk *clk, struct clk *parent);
*/
struct clk *clk_get_parent(struct clk *clk);
/**
* clk_get_sys - get a clock based upon the device name
* @dev_id: device name
* @con_id: connection ID
*
* Returns a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev_id and @con_id to determine the clock consumer, and
* thereby the clock producer. In contrast to clk_get() this function
* takes the device name instead of the device itself for identification.
*
* Drivers must assume that the clock source is not enabled.
*
* clk_get_sys should not be called from within interrupt context.
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
#endif

View File

@@ -21,9 +21,110 @@
typedef u64 cycle_t;
struct clocksource;
/**
* struct cyclecounter - hardware abstraction for a free running counter
* Provides completely state-free accessors to the underlying hardware.
* Depending on which hardware it reads, the cycle counter may wrap
* around quickly. Locking rules (if necessary) have to be defined
* by the implementor and user of specific instances of this API.
*
* @read: returns the current cycle value
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters,
* see CLOCKSOURCE_MASK() helper macro
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*/
struct cyclecounter {
cycle_t (*read)(const struct cyclecounter *cc);
cycle_t mask;
u32 mult;
u32 shift;
};
/**
* struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
* Contains the state needed by timecounter_read() to detect
* cycle counter wrap around. Initialize with
* timecounter_init(). Also used to convert cycle counts into the
* corresponding nanosecond counts with timecounter_cyc2time(). Users
* of this code are responsible for initializing the underlying
* cycle counter hardware, locking issues and reading the time
* more often than the cycle counter wraps around. The nanosecond
* counter will only wrap around after ~585 years.
*
* @cc: the cycle counter used by this instance
* @cycle_last: most recent cycle counter value seen by
* timecounter_read()
* @nsec: continuously increasing count
*/
struct timecounter {
const struct cyclecounter *cc;
cycle_t cycle_last;
u64 nsec;
};
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
* @tc: Pointer to cycle counter.
* @cycles: Cycles
*
* XXX - This could use some mult_lxl_ll() asm optimization. Same code
* as in cyc2ns, but with unsigned result.
*/
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
cycle_t cycles)
{
u64 ret = (u64)cycles;
ret = (ret * cc->mult) >> cc->shift;
return ret;
}
/**
* timecounter_init - initialize a time counter
* @tc: Pointer to time counter which is to be initialized/reset
* @cc: A cycle counter, ready to be used.
* @start_tstamp: Arbitrary initial time stamp.
*
* After this call the current cycle register (roughly) corresponds to
* the initial time stamp. Every call to timecounter_read() increments
* the time stamp counter by the number of elapsed nanoseconds.
*/
extern void timecounter_init(struct timecounter *tc,
const struct cyclecounter *cc,
u64 start_tstamp);
/**
* timecounter_read - return nanoseconds elapsed since timecounter_init()
* plus the initial time stamp
* @tc: Pointer to time counter.
*
* In other words, keeps track of time since the same epoch as
* the function which generated the initial time stamp.
*/
extern u64 timecounter_read(struct timecounter *tc);
/**
* timecounter_cyc2time - convert a cycle counter to same
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
* @cycle: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
* with "max cycle count" == cs->mask+1.
*
* This allows conversion of cycle counter values which were generated
* in the past.
*/
extern u64 timecounter_cyc2time(struct timecounter *tc,
cycle_t cycle_tstamp);
/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
*
* @name: ptr to clocksource name
* @list: list head for registration

View File

@@ -1,6 +1,8 @@
#ifndef _CM4000_H_
#define _CM4000_H_
#include <linux/types.h>
#define MAX_ATR 33
#define CM4000_MAX_DEV 4
@@ -10,9 +12,9 @@
* not to break compilation of userspace apps. -HW */
typedef struct atreq {
int32_t atr_len;
__s32 atr_len;
unsigned char atr[64];
int32_t power_act;
__s32 power_act;
unsigned char bIFSD;
unsigned char bIFSC;
} atreq_t;
@@ -22,13 +24,13 @@ typedef struct atreq {
* member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
* will lay out the structure members differently than the 64bit kernel.
*
* I've changed "ptsreq.protocol" from "unsigned long" to "u_int32_t".
* I've changed "ptsreq.protocol" from "unsigned long" to "__u32".
* On 32bit this will make no difference. With 64bit kernels, it will make
* 32bit apps work, too.
*/
typedef struct ptsreq {
u_int32_t protocol; /*T=0: 2^0, T=1: 2^1*/
__u32 protocol; /*T=0: 2^0, T=1: 2^1*/
unsigned char flags;
unsigned char pts1;
unsigned char pts2;

View File

@@ -65,20 +65,20 @@ struct proc_event {
} ack;
struct fork_proc_event {
pid_t parent_pid;
pid_t parent_tgid;
pid_t child_pid;
pid_t child_tgid;
__kernel_pid_t parent_pid;
__kernel_pid_t parent_tgid;
__kernel_pid_t child_pid;
__kernel_pid_t child_tgid;
} fork;
struct exec_proc_event {
pid_t process_pid;
pid_t process_tgid;
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
} exec;
struct id_proc_event {
pid_t process_pid;
pid_t process_tgid;
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
union {
__u32 ruid; /* task uid */
__u32 rgid; /* task gid */
@@ -90,8 +90,8 @@ struct proc_event {
} id;
struct exit_proc_event {
pid_t process_pid;
pid_t process_tgid;
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
__u32 exit_code, exit_signal;
} exit;
} event_data;

View File

@@ -29,6 +29,7 @@
int com20020_check(struct net_device *dev);
int com20020_found(struct net_device *dev, int shared);
extern const struct net_device_ops com20020_netdev_ops;
/* The number of low I/O ports used by the card. */
#define ARCNET_TOTAL_SIZE 8

View File

@@ -125,6 +125,13 @@ struct compat_dirent {
char d_name[256];
};
struct compat_ustat {
compat_daddr_t f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
@@ -178,11 +185,18 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned nsems, const struct compat_timespec __user *timeout);
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage ssize_t compat_sys_readv(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
asmlinkage ssize_t compat_sys_writev(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, u32 pos_low, u32 pos_high);
int compat_do_execve(char * filename, compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs * regs);

View File

@@ -3,8 +3,10 @@
#endif
/* GCC 4.1.[01] miscompiles __weak */
#if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
# error Your version of gcc miscompiles the __weak directive
#ifdef __KERNEL__
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
# error Your version of gcc miscompiles the __weak directive
# endif
#endif
#define __used __attribute__((__used__))

View File

@@ -68,6 +68,7 @@ struct ftrace_branch_data {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};
@@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
.line = __LINE__, \
}; \
______r = !!(cond); \
if (______r) \
______f.hit++; \
else \
______f.miss++; \
______f.miss_hit[______r]++; \
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */

View File

@@ -39,8 +39,10 @@
#define CN_IDX_V86D 0x4
#define CN_VAL_V86D_UVESAFB 0x1
#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
#define CN_DST_IDX 0x6
#define CN_DST_VAL 0x1
#define CN_NETLINK_USERS 6
#define CN_NETLINK_USERS 7
/*
* Maximum connector's message size.
@@ -109,6 +111,12 @@ struct cn_queue_dev {
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
/* Sent to kevent to create cn_queue only when needed */
struct work_struct wq_creation;
/* Tell if the wq_creation job is pending/completed */
atomic_t wq_requested;
/* Wait for cn_queue to be created */
wait_queue_head_t wq_created;
struct list_head queue_list;
spinlock_t queue_lock;
@@ -164,6 +172,8 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);

View File

@@ -137,8 +137,8 @@ extern void resume_console(void);
int mda_console_init(void);
void prom_con_init(void);
void vcs_make_sysfs(struct tty_struct *tty);
void vcs_remove_sysfs(struct tty_struct *tty);
void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
#if 1

View File

@@ -23,7 +23,6 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/mutex.h>
struct cpu {
int node_id; /* The node which contains the CPU */
@@ -103,16 +102,6 @@ extern struct sysdev_class cpu_sysdev_class;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
{
mutex_lock(cpu_hp_mutex);
}
static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
{
mutex_unlock(cpu_hp_mutex);
}
extern void get_online_cpus(void);
extern void put_online_cpus(void);
#define hotcpu_notifier(fn, pri) { \
@@ -126,11 +115,6 @@ int cpu_down(unsigned int cpu);
#else /* CONFIG_HOTPLUG_CPU */
static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
{ }
static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
{ }
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)

View File

@@ -12,6 +12,7 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/cgroup.h>
#include <linux/mm.h>
#ifdef CONFIG_CPUSETS
@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void);
void cpuset_update_task_memory_state(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
return number_of_cpusets <= 1 ||
__cpuset_zone_allowed_softwall(z, gfp_mask);
__cpuset_node_allowed_softwall(node, gfp_mask);
}
static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
return number_of_cpusets <= 1 ||
__cpuset_zone_allowed_hardwall(z, gfp_mask);
__cpuset_node_allowed_hardwall(node, gfp_mask);
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
}
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -90,12 +101,12 @@ static inline void cpuset_init_smp(void) {}
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
*mask = cpu_possible_map;
cpumask_copy(mask, cpu_possible_mask);
}
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
{
*mask = cpu_possible_map;
cpumask_copy(mask, cpu_possible_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
return 1;

View File

@@ -40,6 +40,7 @@
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -548,9 +549,6 @@ struct crypto_attr_u32 {
* Transform user interface.
*/
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);

View File

@@ -82,9 +82,9 @@ struct cyclades_monitor {
* open)
*/
struct cyclades_idle_stats {
time_t in_use; /* Time device has been in use (secs) */
time_t recv_idle; /* Time since last char received (secs) */
time_t xmit_idle; /* Time since last char transmitted (secs) */
__kernel_time_t in_use; /* Time device has been in use (secs) */
__kernel_time_t recv_idle; /* Time since last char received (secs) */
__kernel_time_t xmit_idle; /* Time since last char transmitted (secs) */
unsigned long recv_bytes; /* Bytes received */
unsigned long xmit_bytes; /* Bytes transmitted */
unsigned long overruns; /* Input overruns */

View File

@@ -1,3 +1,23 @@
/*
* Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef DCA_H
#define DCA_H
/* DCA Provider API */

View File

@@ -112,7 +112,7 @@ struct dentry {
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
unsigned long d_time; /* used by d_revalidate */
struct dentry_operations *d_op;
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
void *d_fsdata; /* fs-specific data */

View File

@@ -355,46 +355,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
return __dccp_hdr_len(dccp_hdr(skb));
}
/* initial values for each feature */
#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
#define DCCPF_INITIAL_ACK_RATIO 2
#define DCCPF_INITIAL_CCID DCCPC_CCID2
/* FIXME: for now we're default to 1 but it should really be 0 */
#define DCCPF_INITIAL_SEND_NDP_COUNT 1
/**
* struct dccp_minisock - Minimal DCCP connection representation
*
* Will be used to pass the state from dccp_request_sock to dccp_sock.
*
* @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
* @dccpms_pending - List of features being negotiated
* @dccpms_conf -
*/
struct dccp_minisock {
__u64 dccpms_sequence_window;
struct list_head dccpms_pending;
struct list_head dccpms_conf;
};
struct dccp_opt_conf {
__u8 *dccpoc_val;
__u8 dccpoc_len;
};
struct dccp_opt_pend {
struct list_head dccpop_node;
__u8 dccpop_type;
__u8 dccpop_feat;
__u8 *dccpop_val;
__u8 dccpop_len;
int dccpop_conf;
struct dccp_opt_conf *dccpop_sc;
};
extern void dccp_minisock_init(struct dccp_minisock *dmsk);
/**
* struct dccp_request_sock - represent DCCP-specific connection request
* @dreq_inet_rsk: structure inherited from
@@ -483,13 +443,14 @@ struct dccp_ackvec;
* @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
* @dccps_l_ack_ratio - feature-local Ack Ratio
* @dccps_r_ack_ratio - feature-remote Ack Ratio
* @dccps_l_seq_win - local Sequence Window (influences ack number validity)
* @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
* @dccps_pcslen - sender partial checksum coverage (via sockopt)
* @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
* @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
* @dccps_ndp_count - number of Non Data Packets since last data packet
* @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
* @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
* @dccps_minisock - associated minisock (accessed via dccp_msk)
* @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
* @dccps_hc_rx_ackvec - rx half connection ack vector
* @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
@@ -523,12 +484,13 @@ struct dccp_sock {
__u32 dccps_timestamp_time;
__u16 dccps_l_ack_ratio;
__u16 dccps_r_ack_ratio;
__u64 dccps_l_seq_win:48;
__u64 dccps_r_seq_win:48;
__u8 dccps_pcslen:4;
__u8 dccps_pcrlen:4;
__u8 dccps_send_ndp_count:1;
__u64 dccps_ndp_count:48;
unsigned long dccps_rate_last;
struct dccp_minisock dccps_minisock;
struct list_head dccps_featneg;
struct dccp_ackvec *dccps_hc_rx_ackvec;
struct ccid *dccps_hc_rx_ccid;
@@ -546,11 +508,6 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk)
return (struct dccp_sock *)sk;
}
static inline struct dccp_minisock *dccp_msk(const struct sock *sk)
{
return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock;
}
static inline const char *dccp_role(const struct sock *sk)
{
switch (dccp_sk(sk)->dccps_role) {

View File

@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
struct dentry *debugfs_create_blob(const char *name, mode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
bool debugfs_initialized(void);
#else
#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
return ERR_PTR(-ENODEV);
}
static inline bool debugfs_initialized(void)
{
return false;
}
#endif
#endif

View File

@@ -139,6 +139,9 @@ struct target_type {
dm_ioctl_fn ioctl;
dm_merge_fn merge;
dm_busy_fn busy;
/* For internal device-mapper use. */
struct list_head list;
};
struct io_restrictions {

View File

@@ -28,6 +28,7 @@
#define BUS_ID_SIZE 20
struct device;
struct device_private;
struct device_driver;
struct driver_private;
struct class;
@@ -147,7 +148,7 @@ extern void put_driver(struct device_driver *drv);
extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
extern int wait_for_device_probe(void);
extern void wait_for_device_probe(void);
/* sysfs interface for exporting driver attributes */
@@ -367,15 +368,11 @@ struct device_dma_parameters {
};
struct device {
struct klist klist_children;
struct klist_node knode_parent; /* node in sibling list */
struct klist_node knode_driver;
struct klist_node knode_bus;
struct device *parent;
struct device_private *p;
struct kobject kobj;
char bus_id[BUS_ID_SIZE]; /* position on parent bus */
unsigned uevent_suppress:1;
const char *init_name; /* initial name of the device */
struct device_type *type;
@@ -387,8 +384,13 @@ struct device {
struct device_driver *driver; /* which driver has allocated this
device */
void *driver_data; /* data private to the driver */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *platform_data; /* We will remove platform_data
field if all platform devices
pass its platform specific data
from platform_device->platform_data,
other kind of devices should not
use platform_data. */
struct dev_pm_info power;
#ifdef CONFIG_NUMA
@@ -427,8 +429,7 @@ struct device {
static inline const char *dev_name(const struct device *dev)
{
/* will be changed into kobject_name(&dev->kobj) in the near future */
return dev->bus_id;
return kobject_name(&dev->kobj);
}
extern int dev_set_name(struct device *dev, const char *name, ...)
@@ -463,6 +464,16 @@ static inline void dev_set_drvdata(struct device *dev, void *data)
dev->driver_data = data;
}
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
{
return dev->kobj.uevent_suppress;
}
static inline void dev_set_uevent_suppress(struct device *dev, int val)
{
dev->kobj.uevent_suppress = val;
}
static inline int device_is_registered(struct device *dev)
{
return dev->kobj.state_in_sysfs;
@@ -483,7 +494,8 @@ extern int device_for_each_child(struct device *dev, void *data,
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, char *new_name);
extern int device_move(struct device *dev, struct device *new_parent);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
/*
* Root device objects for grouping under /sys/devices
@@ -570,7 +582,7 @@ extern const char *dev_driver_string(const struct device *dev);
#if defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG , dev , format , ## arg)
#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define dev_dbg(dev, format, ...) do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)

View File

@@ -9,6 +9,8 @@
#ifndef _DLM_NETLINK_H
#define _DLM_NETLINK_H
#include <linux/types.h>
enum {
DLM_STATUS_WAITING = 1,
DLM_STATUS_GRANTED = 2,
@@ -18,16 +20,16 @@ enum {
#define DLM_LOCK_DATA_VERSION 1
struct dlm_lock_data {
uint16_t version;
uint32_t lockspace_id;
__u16 version;
__u32 lockspace_id;
int nodeid;
int ownpid;
uint32_t id;
uint32_t remid;
uint64_t xid;
int8_t status;
int8_t grmode;
int8_t rqmode;
__u32 id;
__u32 remid;
__u64 xid;
__s8 status;
__s8 grmode;
__s8 rqmode;
unsigned long timestamp;
int resource_namelen;
char resource_name[DLM_RESNAME_MAXLEN];

View File

@@ -28,6 +28,9 @@ struct dm_dirty_log_type {
const char *name;
struct module *module;
/* For internal device-mapper use */
struct list_head list;
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
unsigned argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
@@ -113,6 +116,16 @@ struct dm_dirty_log_type {
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
char *result, unsigned maxlen);
/*
* is_remote_recovering is necessary for cluster mirroring. It provides
* a way to detect recovery on another node, so we aren't writing
* concurrently. This function is likely to block (when a cluster log
* is used).
*
* Returns: 0, 1
*/
int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
};
int dm_dirty_log_type_register(struct dm_dirty_log_type *type);

View File

@@ -113,20 +113,20 @@ struct dm_ioctl {
* return -ENOTTY) fill out this field, even if the
* command failed.
*/
uint32_t version[3]; /* in/out */
uint32_t data_size; /* total size of data passed in
__u32 version[3]; /* in/out */
__u32 data_size; /* total size of data passed in
* including this struct */
uint32_t data_start; /* offset to start of data
__u32 data_start; /* offset to start of data
* relative to start of this struct */
uint32_t target_count; /* in/out */
int32_t open_count; /* out */
uint32_t flags; /* in/out */
uint32_t event_nr; /* in/out */
uint32_t padding;
__u32 target_count; /* in/out */
__s32 open_count; /* out */
__u32 flags; /* in/out */
__u32 event_nr; /* in/out */
__u32 padding;
uint64_t dev; /* in/out */
__u64 dev; /* in/out */
char name[DM_NAME_LEN]; /* device name */
char uuid[DM_UUID_LEN]; /* unique identifier for
@@ -139,9 +139,9 @@ struct dm_ioctl {
* dm_ioctl.
*/
struct dm_target_spec {
uint64_t sector_start;
uint64_t length;
int32_t status; /* used when reading from kernel only */
__u64 sector_start;
__u64 length;
__s32 status; /* used when reading from kernel only */
/*
* Location of the next dm_target_spec.
@@ -153,7 +153,7 @@ struct dm_target_spec {
* (that follows the dm_ioctl struct) to the start of the "next"
* dm_target_spec.
*/
uint32_t next;
__u32 next;
char target_type[DM_MAX_TYPE_NAME];
@@ -168,17 +168,17 @@ struct dm_target_spec {
* Used to retrieve the target dependencies.
*/
struct dm_target_deps {
uint32_t count; /* Array size */
uint32_t padding; /* unused */
uint64_t dev[0]; /* out */
__u32 count; /* Array size */
__u32 padding; /* unused */
__u64 dev[0]; /* out */
};
/*
* Used to get a list of all dm devices.
*/
struct dm_name_list {
uint64_t dev;
uint32_t next; /* offset to the next record from
__u64 dev;
__u32 next; /* offset to the next record from
the _start_ of this */
char name[0];
};
@@ -187,8 +187,8 @@ struct dm_name_list {
* Used to retrieve the target versions
*/
struct dm_target_versions {
uint32_t next;
uint32_t version[3];
__u32 next;
__u32 version[3];
char name[0];
};
@@ -197,7 +197,7 @@ struct dm_target_versions {
* Used to pass message to a target
*/
struct dm_target_msg {
uint64_t sector; /* Device sector */
__u64 sector; /* Device sector */
char message[0];
};

174
include/linux/dma-debug.h Normal file
View File

@@ -0,0 +1,174 @@
/*
* Copyright (C) 2008 Advanced Micro Devices, Inc.
*
* Author: Joerg Roedel <joerg.roedel@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __DMA_DEBUG_H
#define __DMA_DEBUG_H
#include <linux/types.h>
struct device;
struct scatterlist;
struct bus_type;
#ifdef CONFIG_DMA_API_DEBUG
extern void dma_debug_add_bus(struct bus_type *bus);
extern void dma_debug_init(u32 num_entries);
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
bool map_single);
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, bool map_single);
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction);
extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir);
extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt);
extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr);
extern void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
int direction);
extern void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction);
extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction);
extern void debug_dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size, int direction);
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
extern void debug_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
extern void debug_dma_dump_mappings(struct device *dev);
#else /* CONFIG_DMA_API_DEBUG */
static inline void dma_debug_add_bus(struct bus_type *bus)
{
}
static inline void dma_debug_init(u32 num_entries)
{
}
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
bool map_single)
{
}
static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction,
bool map_single)
{
}
static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction)
{
}
static inline void debug_dma_unmap_sg(struct device *dev,
struct scatterlist *sglist,
int nelems, int dir)
{
}
static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt)
{
}
static inline void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr)
{
}
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
{
}
static inline void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
{
}
static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction)
{
}
static inline void debug_dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction)
{
}
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction)
{
}
static inline void debug_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nelems, int direction)
{
}
static inline void debug_dma_dump_mappings(struct device *dev)
{
}
#endif /* CONFIG_DMA_API_DEBUG */
#endif /* __DMA_DEBUG_H */

View File

@@ -3,6 +3,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-attrs.h>
#include <linux/scatterlist.h>
/* These definitions mirror those in pci.h, so they can be used
* interchangeably with their PCI_ counterparts */
@@ -13,6 +15,52 @@ enum dma_data_direction {
DMA_NONE = 3,
};
struct dma_map_ops {
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*unmap_sg)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
void (*sync_single_for_device)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
void (*sync_single_range_for_cpu)(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction dir);
void (*sync_single_range_for_device)(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction dir);
void (*sync_sg_for_cpu)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
int is_phys;
};
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
/*

View File

@@ -11,6 +11,7 @@
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11)
struct intel_iommu;
struct dmar_domain;

View File

@@ -23,9 +23,6 @@
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/kref.h>
#include <linux/completion.h>
#include <linux/rcupdate.h>
#include <linux/dma-mapping.h>
/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
struct dma_device {
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
}
#endif
#ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put()
#define async_dma_find_channel(type) dma_find_channel(type)
#else
static inline void async_dmaengine_get(void)
{
}
static inline void async_dmaengine_put(void)
{
}
static inline struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)
{
return NULL;
}
#endif
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
set_bit(tx_type, dstp->bits);
}
#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
static inline void
__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
clear_bit(tx_type, dstp->bits);
}
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
{

View File

@@ -24,10 +24,10 @@
#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/msi.h>
#include <linux/irqreturn.h>
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct intel_iommu;
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -49,7 +49,7 @@ extern int dmar_dev_scope_init(void);
/* Intel IOMMU detection */
extern void detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
extern int parse_ioapics_under_ir(void);
extern int alloc_iommu(struct dmar_drhd_unit *);
@@ -63,12 +63,12 @@ static inline int dmar_table_init(void)
{
return -ENODEV;
}
static inline int enable_drhd_fault_handling(void)
{
return -1;
}
#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int enable_intr_remapping(int);
struct irte {
union {
struct {
@@ -97,6 +97,10 @@ struct irte {
__u64 high;
};
};
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int enable_intr_remapping(int);
extern int get_irte(int irq, struct irte *entry);
extern int modify_irte(int irq, struct irte *irte_modified);
extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
@@ -111,14 +115,40 @@ extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
#else
static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
return -1;
}
static inline int modify_irte(int irq, struct irte *irte_modified)
{
return -1;
}
static inline int free_irte(int irq)
{
return -1;
}
static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle)
{
return -1;
}
static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
u16 sub_handle)
{
return -1;
}
static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
{
return NULL;
}
static inline struct intel_iommu *map_ioapic_to_ir(int apic)
{
return NULL;
}
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
#define intr_remapping_enabled (0)
#endif
#ifdef CONFIG_DMAR
extern const char *dmar_get_fault_reason(u8 fault_reason);
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
@@ -127,8 +157,10 @@ extern void dmar_msi_mask(unsigned int irq);
extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
extern int arch_setup_dmar_msi(unsigned int irq);
#ifdef CONFIG_DMAR
extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_rmrr_unit {

View File

@@ -47,7 +47,8 @@ extern int dmi_get_year(int field);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
extern int dmi_walk(void (*decode)(const struct dmi_header *));
extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
#else
@@ -61,8 +62,8 @@ static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
static inline int dmi_walk(void (*decode)(const struct dmi_header *))
{ return -1; }
static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data) { return -1; }
static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline const struct dmi_system_id *

View File

@@ -1,12 +0,0 @@
/* platform data for the DS1WM driver */
struct ds1wm_platform_data {
int bus_shift; /* number of shifts needed to calculate the
* offset between DS1WM registers;
* e.g. on h5xxx and h2200 this is 2
* (registers aligned to 4-byte boundaries),
* while on hx4700 this is 1 */
int active_high;
void (*enable)(struct platform_device *pdev);
void (*disable)(struct platform_device *pdev);
};

587
include/linux/dst.h Normal file
View File

@@ -0,0 +1,587 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DST_H
#define __DST_H
#include <linux/types.h>
#include <linux/connector.h>
#define DST_NAMELEN 32
#define DST_NAME "dst"
enum {
/* Remove node with given id from storage */
DST_DEL_NODE = 0,
/* Add remote node with given id to the storage */
DST_ADD_REMOTE,
/* Add local node with given id to the storage to be exported and used by remote peers */
DST_ADD_EXPORT,
/* Crypto initialization command (hash/cipher used to protect the connection) */
DST_CRYPTO,
/* Security attributes for given connection (permissions for example) */
DST_SECURITY,
/* Register given node in the block layer subsystem */
DST_START,
DST_CMD_MAX
};
struct dst_ctl
{
/* Storage name */
char name[DST_NAMELEN];
/* Command flags */
__u32 flags;
/* Command itself (see above) */
__u32 cmd;
/* Maximum number of pages per single request in this device */
__u32 max_pages;
/* Stale/error transaction scanning timeout in milliseconds */
__u32 trans_scan_timeout;
/* Maximum number of retry sends before completing transaction as broken */
__u32 trans_max_retries;
/* Storage size */
__u64 size;
};
/* Reply command carries completion status */
struct dst_ctl_ack
{
struct cn_msg msg;
int error;
int unused[3];
};
/*
* Unfortunaltely socket address structure is not exported to userspace
* and is redefined there.
*/
#define SADDR_MAX_DATA 128
struct saddr {
/* address family, AF_xxx */
unsigned short sa_family;
/* 14 bytes of protocol address */
char sa_data[SADDR_MAX_DATA];
/* Number of bytes used in sa_data */
unsigned short sa_data_len;
};
/* Address structure */
struct dst_network_ctl
{
/* Socket type: datagram, stream...*/
unsigned int type;
/* Let me guess, is it a Jupiter diameter? */
unsigned int proto;
/* Peer's address */
struct saddr addr;
};
struct dst_crypto_ctl
{
/* Cipher and hash names */
char cipher_algo[DST_NAMELEN];
char hash_algo[DST_NAMELEN];
/* Key sizes. Can be zero for digest for example */
unsigned int cipher_keysize, hash_keysize;
/* Alignment. Calculated by the DST itself. */
unsigned int crypto_attached_size;
/* Number of threads to perform crypto operations */
int thread_num;
};
/* Export security attributes have this bits checked in when client connects */
#define DST_PERM_READ (1<<0)
#define DST_PERM_WRITE (1<<1)
/*
* Right now it is simple model, where each remote address
* is assigned to set of permissions it is allowed to perform.
* In real world block device does not know anything but
* reading and writing, so it should be more than enough.
*/
struct dst_secure_user
{
unsigned int permissions;
struct saddr addr;
};
/*
* Export control command: device to export and network address to accept
* clients to work with given device
*/
struct dst_export_ctl
{
char device[DST_NAMELEN];
struct dst_network_ctl ctl;
};
enum {
DST_CFG = 1, /* Request remote configuration */
DST_IO, /* IO command */
DST_IO_RESPONSE, /* IO response */
DST_PING, /* Keepalive message */
DST_NCMD_MAX,
};
struct dst_cmd
{
/* Network command itself, see above */
__u32 cmd;
/*
* Size of the attached data
* (in most cases, for READ command it means how many bytes were requested)
*/
__u32 size;
/* Crypto size: number of attached bytes with digest/hmac */
__u32 csize;
/* Here we can carry secret data */
__u32 reserved;
/* Read/write bits, see how they are encoded in bio structure */
__u64 rw;
/* BIO flags */
__u64 flags;
/* Unique command id (like transaction ID) */
__u64 id;
/* Sector to start IO from */
__u64 sector;
/* Hash data is placed after this header */
__u8 hash[0];
};
/*
* Convert command to/from network byte order.
* We do not use hton*() functions, since there is
* no 64-bit implementation.
*/
static inline void dst_convert_cmd(struct dst_cmd *c)
{
c->cmd = __cpu_to_be32(c->cmd);
c->csize = __cpu_to_be32(c->csize);
c->size = __cpu_to_be32(c->size);
c->sector = __cpu_to_be64(c->sector);
c->id = __cpu_to_be64(c->id);
c->flags = __cpu_to_be64(c->flags);
c->rw = __cpu_to_be64(c->rw);
}
/* Transaction id */
typedef __u64 dst_gen_t;
#ifdef __KERNEL__
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/device.h>
#include <linux/mempool.h>
#include <linux/net.h>
#include <linux/poll.h>
#include <linux/rbtree.h>
#ifdef CONFIG_DST_DEBUG
#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
#else
static inline void __attribute__ ((format (printf, 1, 2)))
dprintk(const char *fmt, ...) {}
#endif
struct dst_node;
struct dst_trans
{
/* DST node we are working with */
struct dst_node *n;
/* Entry inside transaction tree */
struct rb_node trans_entry;
/* Merlin kills this transaction when this memory cell equals zero */
atomic_t refcnt;
/* How this transaction should be processed by crypto engine */
short enc;
/* How many times this transaction was resent */
short retries;
/* Completion status */
int error;
/* When did we send it to the remote peer */
long send_time;
/* My name is...
* Well, computers does not speak, they have unique id instead */
dst_gen_t gen;
/* Block IO we are working with */
struct bio *bio;
/* Network command for above block IO request */
struct dst_cmd cmd;
};
struct dst_crypto_engine
{
/* What should we do with all block requests */
struct crypto_hash *hash;
struct crypto_ablkcipher *cipher;
/* Pool of pages used to encrypt data into before sending */
int page_num;
struct page **pages;
/* What to do with current request */
int enc;
/* Who we are and where do we go */
struct scatterlist *src, *dst;
/* Maximum timeout waiting for encryption to be completed */
long timeout;
/* IV is a 64-bit sequential counter */
u64 iv;
/* Secret data */
void *private;
/* Cached temporary data lives here */
int size;
void *data;
};
struct dst_state
{
/* The main state protection */
struct mutex state_lock;
/* Polling machinery for sockets */
wait_queue_t wait;
wait_queue_head_t *whead;
/* Most of events are being waited here */
wait_queue_head_t thread_wait;
/* Who owns this? */
struct dst_node *node;
/* Network address for this state */
struct dst_network_ctl ctl;
/* Permissions to work with: read-only or rw connection */
u32 permissions;
/* Called when we need to clean private data */
void (* cleanup)(struct dst_state *st);
/* Used by the server: BIO completion queues BIOs here */
struct list_head request_list;
spinlock_t request_lock;
/* Guess what? No, it is not number of planets */
atomic_t refcnt;
/* This flags is set when connection should be dropped */
int need_exit;
/*
* Socket to work with. Second pointer is used for
* lockless check if socket was changed before performing
* next action (like working with cached polling result)
*/
struct socket *socket, *read_socket;
/* Cached preallocated data */
void *data;
unsigned int size;
/* Currently processed command */
struct dst_cmd cmd;
};
struct dst_info
{
/* Device size */
u64 size;
/* Local device name for export devices */
char local[DST_NAMELEN];
/* Network setup */
struct dst_network_ctl net;
/* Sysfs bits use this */
struct device device;
};
struct dst_node
{
struct list_head node_entry;
/* Hi, my name is stored here */
char name[DST_NAMELEN];
/* My cache name is stored here */
char cache_name[DST_NAMELEN];
/* Block device attached to given node.
* Only valid for exporting nodes */
struct block_device *bdev;
/* Network state machine for given peer */
struct dst_state *state;
/* Block IO machinery */
struct request_queue *queue;
struct gendisk *disk;
/* Number of threads in processing pool */
int thread_num;
/* Maximum number of pages in single IO */
int max_pages;
/* I'm that big in bytes */
loff_t size;
/* Exported to userspace node information */
struct dst_info *info;
/*
* Security attribute list.
* Used only by exporting node currently.
*/
struct list_head security_list;
struct mutex security_lock;
/*
* When this unerflows below zero, university collapses.
* But this will not happen, since node will be freed,
* when reference counter reaches zero.
*/
atomic_t refcnt;
/* How precisely should I be started? */
int (*start)(struct dst_node *);
/* Crypto capabilities */
struct dst_crypto_ctl crypto;
u8 *hash_key;
u8 *cipher_key;
/* Pool of processing thread */
struct thread_pool *pool;
/* Transaction IDs live here */
atomic_long_t gen;
/*
* How frequently and how many times transaction
* tree should be scanned to drop stale objects.
*/
long trans_scan_timeout;
int trans_max_retries;
/* Small gnomes live here */
struct rb_root trans_root;
struct mutex trans_lock;
/*
* Transaction cache/memory pool.
* It is big enough to contain not only transaction
* itself, but additional crypto data (digest/hmac).
*/
struct kmem_cache *trans_cache;
mempool_t *trans_pool;
/* This entity scans transaction tree */
struct delayed_work trans_work;
wait_queue_head_t wait;
};
/* Kernel representation of the security attribute */
struct dst_secure
{
struct list_head sec_entry;
struct dst_secure_user sec;
};
int dst_process_bio(struct dst_node *n, struct bio *bio);
int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
static inline struct dst_state *dst_state_get(struct dst_state *st)
{
BUG_ON(atomic_read(&st->refcnt) == 0);
atomic_inc(&st->refcnt);
return st;
}
void dst_state_put(struct dst_state *st);
struct dst_state *dst_state_alloc(struct dst_node *n);
int dst_state_socket_create(struct dst_state *st);
void dst_state_socket_release(struct dst_state *st);
void dst_state_exit_connected(struct dst_state *st);
int dst_state_schedule_receiver(struct dst_state *st);
void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
static inline void dst_state_lock(struct dst_state *st)
{
mutex_lock(&st->state_lock);
}
static inline void dst_state_unlock(struct dst_state *st)
{
mutex_unlock(&st->state_lock);
}
void dst_poll_exit(struct dst_state *st);
int dst_poll_init(struct dst_state *st);
static inline unsigned int dst_state_poll(struct dst_state *st)
{
unsigned int revents = POLLHUP | POLLERR;
dst_state_lock(st);
if (st->socket)
revents = st->socket->ops->poll(NULL, st->socket, NULL);
dst_state_unlock(st);
return revents;
}
static inline int dst_thread_setup(void *private, void *data)
{
return 0;
}
void dst_node_put(struct dst_node *n);
static inline struct dst_node *dst_node_get(struct dst_node *n)
{
atomic_inc(&n->refcnt);
return n;
}
int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
int dst_recv_cdata(struct dst_state *st, void *cdata);
int dst_data_send_header(struct socket *sock,
void *data, unsigned int size, int more);
int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
int dst_process_io(struct dst_state *st);
int dst_export_crypto(struct dst_node *n, struct bio *bio);
int dst_export_send_bio(struct bio *bio);
int dst_start_export(struct dst_node *n);
int __init dst_export_init(void);
void dst_export_exit(void);
/* Private structure for export block IO requests */
struct dst_export_priv
{
struct list_head request_entry;
struct dst_state *state;
struct bio *bio;
struct dst_cmd cmd;
};
static inline void dst_trans_get(struct dst_trans *t)
{
atomic_inc(&t->refcnt);
}
struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
int dst_trans_remove(struct dst_trans *t);
int dst_trans_remove_nolock(struct dst_trans *t);
void dst_trans_put(struct dst_trans *t);
/*
* Convert bio into network command.
*/
static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
u32 command, u64 id)
{
cmd->cmd = command;
cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
cmd->rw = bio->bi_rw;
cmd->size = bio->bi_size;
cmd->csize = 0;
cmd->id = id;
cmd->sector = bio->bi_sector;
};
int dst_trans_send(struct dst_trans *t);
int dst_trans_crypto(struct dst_trans *t);
int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
void dst_node_crypto_exit(struct dst_node *n);
static inline int dst_need_crypto(struct dst_node *n)
{
struct dst_crypto_ctl *c = &n->crypto;
/*
* Logical OR is appropriate here, but boolean one produces
* more optimal code, so it is used instead.
*/
return (c->hash_algo[0] | c->cipher_algo[0]);
}
int dst_node_trans_init(struct dst_node *n, unsigned int size);
void dst_node_trans_exit(struct dst_node *n);
/*
* Pool of threads.
* Ready list contains threads currently free to be used,
* active one contains threads with some work scheduled for them.
* Caller can wait in given queue when thread is ready.
*/
struct thread_pool
{
int thread_num;
struct mutex thread_lock;
struct list_head ready_list, active_list;
wait_queue_head_t wait;
};
void thread_pool_del_worker(struct thread_pool *p);
void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
int thread_pool_add_worker(struct thread_pool *p,
char *name,
unsigned int id,
void *(* init)(void *data),
void (* cleanup)(void *data),
void *data);
void thread_pool_destroy(struct thread_pool *p);
struct thread_pool *thread_pool_create(int num, char *name,
void *(* init)(void *data),
void (* cleanup)(void *data),
void *data);
int thread_pool_schedule(struct thread_pool *p,
int (* setup)(void *stored_private, void *setup_data),
int (* action)(void *stored_private, void *setup_data),
void *setup_data, long timeout);
int thread_pool_schedule_private(struct thread_pool *p,
int (* setup)(void *private, void *data),
int (* action)(void *private, void *data),
void *data, long timeout, void *id);
#endif /* __KERNEL__ */
#endif /* __DST_H */

View File

@@ -76,7 +76,7 @@ struct audio_karaoke{ /* if Vocal1 or Vocal2 are non-zero, they get mixed */
} audio_karaoke_t; /* into left and right */
typedef uint16_t audio_attributes_t;
typedef __u16 audio_attributes_t;
/* bits: descr. */
/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
/* 12 multichannel extension */

View File

@@ -132,12 +132,12 @@ struct video_command {
#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3)
struct video_event {
int32_t type;
__s32 type;
#define VIDEO_EVENT_SIZE_CHANGED 1
#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
#define VIDEO_EVENT_DECODER_STOPPED 3
#define VIDEO_EVENT_VSYNC 4
time_t timestamp;
__kernel_time_t timestamp;
union {
video_size_t size;
unsigned int frame_rate; /* in frames per 1000sec */
@@ -157,25 +157,25 @@ struct video_status {
struct video_still_picture {
char __user *iFrame; /* pointer to a single iframe in memory */
int32_t size;
__s32 size;
};
typedef
struct video_highlight {
int active; /* 1=show highlight, 0=hide highlight */
uint8_t contrast1; /* 7- 4 Pattern pixel contrast */
__u8 contrast1; /* 7- 4 Pattern pixel contrast */
/* 3- 0 Background pixel contrast */
uint8_t contrast2; /* 7- 4 Emphasis pixel-2 contrast */
__u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
/* 3- 0 Emphasis pixel-1 contrast */
uint8_t color1; /* 7- 4 Pattern pixel color */
__u8 color1; /* 7- 4 Pattern pixel color */
/* 3- 0 Background pixel color */
uint8_t color2; /* 7- 4 Emphasis pixel-2 color */
__u8 color2; /* 7- 4 Emphasis pixel-2 color */
/* 3- 0 Emphasis pixel-1 color */
uint32_t ypos; /* 23-22 auto action mode */
__u32 ypos; /* 23-22 auto action mode */
/* 21-12 start y */
/* 9- 0 end y */
uint32_t xpos; /* 23-22 button color number */
__u32 xpos; /* 23-22 button color number */
/* 21-12 start x */
/* 9- 0 end x */
} video_highlight_t;
@@ -189,17 +189,17 @@ typedef struct video_spu {
typedef struct video_spu_palette { /* SPU Palette information */
int length;
uint8_t __user *palette;
__u8 __user *palette;
} video_spu_palette_t;
typedef struct video_navi_pack {
int length; /* 0 ... 1024 */
uint8_t data[1024];
__u8 data[1024];
} video_navi_pack_t;
typedef uint16_t video_attributes_t;
typedef __u16 video_attributes_t;
/* bits: descr. */
/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
/* 13-12 TV system (0=525/60, 1=625/50) */

View File

@@ -74,4 +74,23 @@ struct dw_dma_slave {
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
/* DMA API extensions */
struct dw_cyclic_desc {
struct dw_desc **desc;
unsigned long periods;
void (*period_callback)(void *param);
void *period_callback_param;
};
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
enum dma_data_direction direction);
void dw_dma_cyclic_free(struct dma_chan *chan);
int dw_dma_cyclic_start(struct dma_chan *chan);
void dw_dma_cyclic_stop(struct dma_chan *chan);
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
#endif /* DW_DMAC_H */

View File

@@ -0,0 +1,88 @@
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
* bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
* use independent hash functions, to reduce the chance of false positives.
*/
extern long long dynamic_debug_enabled;
extern long long dynamic_debug_enabled2;
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
* the special section is treated as an array of these.
*/
struct _ddebug {
/*
* These fields are used to drive the user interface
* for selecting and displaying debug callsites.
*/
const char *modname;
const char *function;
const char *filename;
const char *format;
char primary_hash;
char secondary_hash;
unsigned int lineno:24;
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
* writes commands to <debugfs>/dynamic_debug/ddebug
*/
#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
#define _DPRINTK_FLAGS_DEFAULT 0
unsigned int flags:8;
} __attribute__((aligned(8)));
int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname);
#if defined(CONFIG_DYNAMIC_DEBUG)
extern int ddebug_remove_module(char *mod_name);
#define __dynamic_dbg_enabled(dd) ({ \
int __ret = 0; \
if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \
(dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \
if (unlikely(dd.flags)) \
__ret = 1; \
__ret; })
#define dynamic_pr_debug(fmt, ...) do { \
static struct _ddebug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
#define dynamic_dev_dbg(dev, fmt, ...) do { \
static struct _ddebug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
dev_printk(KERN_DEBUG, dev, \
KBUILD_MODNAME ": " pr_fmt(fmt),\
##__VA_ARGS__); \
} while (0)
#else
static inline int ddebug_remove_module(char *mod)
{
return 0;
}
#define dynamic_pr_debug(fmt, ...) do { } while (0)
#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
#endif
#endif

View File

@@ -1,93 +0,0 @@
#ifndef _DYNAMIC_PRINTK_H
#define _DYNAMIC_PRINTK_H
#define DYNAMIC_DEBUG_HASH_BITS 6
#define DEBUG_HASH_TABLE_SIZE (1 << DYNAMIC_DEBUG_HASH_BITS)
#define TYPE_BOOLEAN 1
#define DYNAMIC_ENABLED_ALL 0
#define DYNAMIC_ENABLED_NONE 1
#define DYNAMIC_ENABLED_SOME 2
extern int dynamic_enabled;
/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
* bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
* use independent hash functions, to reduce the chance of false positives.
*/
extern long long dynamic_printk_enabled;
extern long long dynamic_printk_enabled2;
struct mod_debug {
char *modname;
char *logical_modname;
char *flag_names;
int type;
int hash;
int hash2;
} __attribute__((aligned(8)));
int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
char *flags, int hash, int hash2);
#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
extern int unregister_dynamic_debug_module(char *mod_name);
extern int __dynamic_dbg_enabled_helper(char *modname, int type,
int value, int hash);
#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ \
int __ret = 0; \
if (unlikely((dynamic_printk_enabled & (1LL << DEBUG_HASH)) && \
(dynamic_printk_enabled2 & (1LL << DEBUG_HASH2)))) \
__ret = __dynamic_dbg_enabled_helper(module, type, \
value, hash);\
__ret; })
#define dynamic_pr_debug(fmt, ...) do { \
static char mod_name[] \
__attribute__((section("__verbose_strings"))) \
= KBUILD_MODNAME; \
static struct mod_debug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
{ mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
0, 0, DEBUG_HASH)) \
printk(KERN_DEBUG KBUILD_MODNAME ":" fmt, \
##__VA_ARGS__); \
} while (0)
#define dynamic_dev_dbg(dev, format, ...) do { \
static char mod_name[] \
__attribute__((section("__verbose_strings"))) \
= KBUILD_MODNAME; \
static struct mod_debug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
{ mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
0, 0, DEBUG_HASH)) \
dev_printk(KERN_DEBUG, dev, \
KBUILD_MODNAME ": " format, \
##__VA_ARGS__); \
} while (0)
#else
static inline int unregister_dynamic_debug_module(const char *mod_name)
{
return 0;
}
static inline int __dynamic_dbg_enabled_helper(char *modname, int type,
int value, int hash)
{
return 0;
}
#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ 0; })
#define dynamic_pr_debug(fmt, ...) do { } while (0)
#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
#endif
#endif

View File

@@ -18,6 +18,7 @@ struct sock_extended_err
#define SO_EE_ORIGIN_LOCAL 1
#define SO_EE_ORIGIN_ICMP 2
#define SO_EE_ORIGIN_ICMP6 3
#define SO_EE_ORIGIN_TIMESTAMPING 4
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))

View File

@@ -184,4 +184,25 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
}
#endif /* __KERNEL__ */
/**
* compare_ether_header - Compare two Ethernet headers
* @a: Pointer to Ethernet header
* @b: Pointer to Ethernet header
*
* Compare two ethernet headers, returns 0 if equal.
* This assumes that the network header (i.e., IP header) is 4-byte
* aligned OR the platform can handle unaligned access. This is the
* case for all packets coming into netif_receive_skb or similar
* entry points.
*/
static inline int compare_ether_header(const void *a, const void *b)
{
u32 *a32 = (u32 *)((u8 *)a + 2);
u32 *b32 = (u32 *)((u8 *)b + 2);
return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
(a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
}
#endif /* _LINUX_ETHERDEVICE_H */

View File

@@ -7,6 +7,7 @@
* Portions Copyright 2002 Intel (eli.kupermann@intel.com,
* christopher.leech@intel.com,
* scott.feldman@intel.com)
* Portions Copyright (C) Sun Microsystems 2008
*/
#ifndef _LINUX_ETHTOOL_H
@@ -287,10 +288,75 @@ enum ethtool_flags {
ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */
};
struct ethtool_rxnfc {
__u32 cmd;
/* The following structures are for supporting RX network flow
* classification configuration. Note, all multibyte fields, e.g.,
* ip4src, ip4dst, psrc, pdst, spi, etc. are expected to be in network
* byte order.
*/
struct ethtool_tcpip4_spec {
__be32 ip4src;
__be32 ip4dst;
__be16 psrc;
__be16 pdst;
__u8 tos;
};
struct ethtool_ah_espip4_spec {
__be32 ip4src;
__be32 ip4dst;
__be32 spi;
__u8 tos;
};
struct ethtool_rawip4_spec {
__be32 ip4src;
__be32 ip4dst;
__u8 hdata[64];
};
struct ethtool_ether_spec {
__be16 ether_type;
__u8 frame_size;
__u8 eframe[16];
};
#define ETH_RX_NFC_IP4 1
#define ETH_RX_NFC_IP6 2
struct ethtool_usrip4_spec {
__be32 ip4src;
__be32 ip4dst;
__be32 l4_4_bytes;
__u8 tos;
__u8 ip_ver;
__u8 proto;
};
struct ethtool_rx_flow_spec {
__u32 flow_type;
__u64 data;
union {
struct ethtool_tcpip4_spec tcp_ip4_spec;
struct ethtool_tcpip4_spec udp_ip4_spec;
struct ethtool_tcpip4_spec sctp_ip4_spec;
struct ethtool_ah_espip4_spec ah_ip4_spec;
struct ethtool_ah_espip4_spec esp_ip4_spec;
struct ethtool_rawip4_spec raw_ip4_spec;
struct ethtool_ether_spec ether_spec;
struct ethtool_usrip4_spec usr_ip4_spec;
__u8 hdata[64];
} h_u, m_u; /* entry, mask */
__u64 ring_cookie;
__u32 location;
};
struct ethtool_rxnfc {
__u32 cmd;
__u32 flow_type;
/* The rx flow hash value or the rule DB size */
__u64 data;
struct ethtool_rx_flow_spec fs;
__u32 rule_cnt;
__u32 rule_locs[0];
};
#ifdef __KERNEL__
@@ -417,8 +483,8 @@ struct ethtool_ops {
/* the following hooks are obsolete */
int (*self_test_count)(struct net_device *);/* use get_sset_count */
int (*get_stats_count)(struct net_device *);/* use get_sset_count */
int (*get_rxhash)(struct net_device *, struct ethtool_rxnfc *);
int (*set_rxhash)(struct net_device *, struct ethtool_rxnfc *);
int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, void *);
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
};
#endif /* __KERNEL__ */
@@ -469,6 +535,12 @@ struct ethtool_ops {
#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */
#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */
#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */
#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */
#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */
#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -565,9 +637,13 @@ struct ethtool_ops {
#define UDP_V6_FLOW 0x06
#define SCTP_V6_FLOW 0x07
#define AH_ESP_V6_FLOW 0x08
#define AH_V4_FLOW 0x09
#define ESP_V4_FLOW 0x0a
#define AH_V6_FLOW 0x0b
#define ESP_V6_FLOW 0x0c
#define IP_USER_FLOW 0x0d
/* L3-L4 network traffic flow hash options */
#define RXH_DEV_PORT (1 << 0)
#define RXH_L2DA (1 << 1)
#define RXH_VLAN (1 << 2)
#define RXH_L3_PROTO (1 << 3)
@@ -577,5 +653,6 @@ struct ethtool_ops {
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
#endif /* _LINUX_ETHTOOL_H */

View File

@@ -13,10 +13,20 @@
/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>
/* Flags for eventfd2. */
/*
* CAREFUL: Check include/asm-generic/fcntl.h when defining
* new flags, since they might collide with O_* ones. We want
* to re-use O_* flags that couldn't possibly have a meaning
* from eventfd, in order to leave a free define-space for
* shared O_* flags.
*/
#define EFD_SEMAPHORE (1 << 0)
#define EFD_CLOEXEC O_CLOEXEC
#define EFD_NONBLOCK O_NONBLOCK
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
struct file *eventfd_fget(int fd);
int eventfd_signal(struct file *file, int n);

View File

@@ -61,7 +61,6 @@ struct file;
static inline void eventpoll_init_file(struct file *file)
{
INIT_LIST_HEAD(&file->f_ep_links);
spin_lock_init(&file->f_ep_lock);
}

View File

@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
/* Used to pass group descriptor data when online resize is done */
struct ext3_new_group_input {
@@ -893,9 +894,8 @@ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
/* ioctl.c */
extern int ext3_ioctl (struct inode *, struct file *, unsigned int,
unsigned long);
extern long ext3_compat_ioctl (struct file *, unsigned int, unsigned long);
extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
/* namei.c */
extern int ext3_orphan_add(handle_t *, struct inode *);

View File

@@ -123,6 +123,7 @@ struct dentry;
#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */
#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */
#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
@@ -960,15 +961,7 @@ extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
extern struct class *fb_class;
static inline int lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
if (!info->fbops) {
mutex_unlock(&info->lock);
return 0;
}
return 1;
}
extern int lock_fb_info(struct fb_info *info);
static inline void unlock_fb_info(struct fb_info *info)
{

View File

@@ -25,10 +25,12 @@
#include <linux/types.h>
#include <linux/firewire-constants.h>
#define FW_CDEV_EVENT_BUS_RESET 0x00
#define FW_CDEV_EVENT_RESPONSE 0x01
#define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
#define FW_CDEV_EVENT_BUS_RESET 0x00
#define FW_CDEV_EVENT_RESPONSE 0x01
#define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
@@ -136,7 +138,24 @@ struct fw_cdev_event_request {
* This event is sent when the controller has completed an &fw_cdev_iso_packet
* with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
* stripped of all packets up until and including the interrupt packet are
* returned in the @header field.
* returned in the @header field. The amount of header data per packet is as
* specified at iso context creation by &fw_cdev_create_iso_context.header_size.
*
* In version 1 of this ABI, header data consisted of the 1394 isochronous
* packet header, followed by quadlets from the packet payload if
* &fw_cdev_create_iso_context.header_size > 4.
*
* In version 2 of this ABI, header data consist of the 1394 isochronous
* packet header, followed by a timestamp quadlet if
* &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
* packet payload if &fw_cdev_create_iso_context.header_size > 8.
*
* Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
*
* Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
* 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
* 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
* order.
*/
struct fw_cdev_event_iso_interrupt {
__u64 closure;
@@ -146,6 +165,35 @@ struct fw_cdev_event_iso_interrupt {
__u32 header[0];
};
/**
* struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
* @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
* %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
* @handle: Reference by which an allocated resource can be deallocated
* @channel: Isochronous channel which was (de)allocated, if any
* @bandwidth: Bandwidth allocation units which were (de)allocated, if any
*
* An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
* resource was allocated at the IRM. The client has to check @channel and
* @bandwidth for whether the allocation actually succeeded.
*
* An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
* resource was deallocated at the IRM. It is also sent when automatic
* reallocation after a bus reset failed.
*
* @channel is <0 if no channel was (de)allocated or if reallocation failed.
* @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
*/
struct fw_cdev_event_iso_resource {
__u64 closure;
__u32 type;
__u32 handle;
__s32 channel;
__s32 bandwidth;
};
/**
* union fw_cdev_event - Convenience union of fw_cdev_event_ types
* @common: Valid for all types
@@ -153,6 +201,9 @@ struct fw_cdev_event_iso_interrupt {
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
* @iso_resource: Valid if @common.type ==
* %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
* %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
*
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
@@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt {
* not fit will be discarded so that the next read(2) will return a new event.
*/
union fw_cdev_event {
struct fw_cdev_event_common common;
struct fw_cdev_event_bus_reset bus_reset;
struct fw_cdev_event_response response;
struct fw_cdev_event_request request;
struct fw_cdev_event_iso_interrupt iso_interrupt;
struct fw_cdev_event_common common;
struct fw_cdev_event_bus_reset bus_reset;
struct fw_cdev_event_response response;
struct fw_cdev_event_request request;
struct fw_cdev_event_iso_interrupt iso_interrupt;
struct fw_cdev_event_iso_resource iso_resource;
};
#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
/* available since kernel version 2.6.22 */
#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
/* available since kernel version 2.6.24 */
#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
/* FW_CDEV_VERSION History
*
* 1 Feb 18, 2007: Initial version.
/* available since kernel version 2.6.30 */
#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate)
#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */
#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
/*
* FW_CDEV_VERSION History
* 1 (2.6.22) - initial version
* 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
* &fw_cdev_create_iso_context.header_size is 8 or more
*/
#define FW_CDEV_VERSION 1
#define FW_CDEV_VERSION 2
/**
* struct fw_cdev_get_info - General purpose information ioctl
@@ -201,7 +266,7 @@ union fw_cdev_event {
* case, @rom_length is updated with the actual length of the
* configuration ROM.
* @rom: If non-zero, address of a buffer to be filled by a copy of the
* local node's configuration ROM
* device's configuration ROM
* @bus_reset: If non-zero, address of a buffer to be filled by a
* &struct fw_cdev_event_bus_reset with the current state
* of the bus. This does not cause a bus reset to happen.
@@ -229,7 +294,7 @@ struct fw_cdev_get_info {
* Send a request to the device. This ioctl implements all outgoing requests.
* Both quadlet and block request specify the payload as a pointer to the data
* in the @data field. Once the transaction completes, the kernel writes an
* &fw_cdev_event_request event back. The @closure field is passed back to
* &fw_cdev_event_response event back. The @closure field is passed back to
* user space in the response event.
*/
struct fw_cdev_send_request {
@@ -284,9 +349,9 @@ struct fw_cdev_allocate {
};
/**
* struct fw_cdev_deallocate - Free an address range allocation
* @handle: Handle to the address range, as returned by the kernel when the
* range was allocated
* struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
* @handle: Handle to the address range or iso resource, as returned by the
* kernel when the range or resource was allocated
*/
struct fw_cdev_deallocate {
__u32 handle;
@@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset {
* If successful, the kernel adds the descriptor and writes back a handle to the
* kernel-side object to be used for later removal of the descriptor block and
* immediate key.
*
* This ioctl affects the configuration ROMs of all local nodes.
* The ioctl only succeeds on device files which represent a local node.
*/
struct fw_cdev_add_descriptor {
__u32 immediate;
@@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor {
* descriptor was added
*
* Remove a descriptor block and accompanying immediate key from the local
* node's configuration ROM.
* nodes' configuration ROMs.
*/
struct fw_cdev_remove_descriptor {
__u32 handle;
@@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor {
*
* If a context was successfully created, the kernel writes back a handle to the
* context, which must be passed in for subsequent operations on that context.
*
* Note that the effect of a @header_size > 4 depends on
* &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
*/
struct fw_cdev_create_iso_context {
__u32 type;
@@ -473,10 +544,91 @@ struct fw_cdev_stop_iso {
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
* and also the system clock. This allows to express the receive time of an
* isochronous packet as a system time with microsecond accuracy.
*
* @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
* 12 bits cycleOffset, in host byte order.
*/
struct fw_cdev_get_cycle_timer {
__u64 local_time;
__u32 cycle_timer;
};
/**
* struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
* @closure: Passed back to userspace in correponding iso resource events
* @channels: Isochronous channels of which one is to be (de)allocated
* @bandwidth: Isochronous bandwidth units to be (de)allocated
* @handle: Handle to the allocation, written by the kernel (only valid in
* case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
*
* The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
* isochronous channel and/or of isochronous bandwidth at the isochronous
* resource manager (IRM). Only one of the channels specified in @channels is
* allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
* communication with the IRM, indicating success or failure in the event data.
* The kernel will automatically reallocate the resources after bus resets.
* Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
* will be sent. The kernel will also automatically deallocate the resources
* when the file descriptor is closed.
*
* The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
* deallocation of resources which were allocated as described above.
* An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
*
* The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
* without automatic re- or deallocation.
* An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
* indicating success or failure in its data.
*
* The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
* %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
* instead of allocated.
* An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
*
* To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
* for the lifetime of the fd or handle.
* In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
* for the duration of a bus generation.
*
* @channels is a host-endian bitfield with the least significant bit
* representing channel 0 and the most significant bit representing channel 63:
* 1ULL << c for each channel c that is a candidate for (de)allocation.
*
* @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
* one quadlet of data (payload or header data) at speed S1600.
*/
struct fw_cdev_allocate_iso_resource {
__u64 closure;
__u64 channels;
__u32 bandwidth;
__u32 handle;
};
/**
* struct fw_cdev_send_stream_packet - send an asynchronous stream packet
* @length: Length of outgoing payload, in bytes
* @tag: Data format tag
* @channel: Isochronous channel to transmit to
* @sy: Synchronization code
* @closure: Passed back to userspace in the response event
* @data: Userspace pointer to payload
* @generation: The bus generation where packet is valid
* @speed: Speed to transmit at
*
* The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
* to every device which is listening to the specified channel. The kernel
* writes an &fw_cdev_event_response event which indicates success or failure of
* the transmission.
*/
struct fw_cdev_send_stream_packet {
__u32 length;
__u32 tag;
__u32 channel;
__u32 sy;
__u64 closure;
__u64 data;
__u32 generation;
__u32 speed;
};
#endif /* _LINUX_FIREWIRE_CDEV_H */

View File

@@ -141,6 +141,7 @@ struct inodes_stat_t {
#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define MS_I_VERSION (1<<23) /* Update inode I_version field */
#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
#define MS_ACTIVE (1<<30)
#define MS_NOUSER (1<<31)
@@ -848,6 +849,7 @@ struct file {
#define f_dentry f_path.dentry
#define f_vfsmnt f_path.mnt
const struct file_operations *f_op;
spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
@@ -866,7 +868,6 @@ struct file {
#ifdef CONFIG_EPOLL
/* Used by fs/eventpoll.c to link all the hooks to this file */
struct list_head f_ep_links;
spinlock_t f_ep_lock;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
#ifdef CONFIG_DEBUG_WRITECOUNT
@@ -1063,34 +1064,147 @@ extern int lease_modify(struct file_lock **, int);
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
#else /* !CONFIG_FILE_LOCKING */
#define fcntl_getlk(a, b) ({ -EINVAL; })
#define fcntl_setlk(a, b, c, d) ({ -EACCES; })
static inline int fcntl_getlk(struct file *file, struct flock __user *user)
{
return -EINVAL;
}
static inline int fcntl_setlk(unsigned int fd, struct file *file,
unsigned int cmd, struct flock __user *user)
{
return -EACCES;
}
#if BITS_PER_LONG == 32
#define fcntl_getlk64(a, b) ({ -EINVAL; })
#define fcntl_setlk64(a, b, c, d) ({ -EACCES; })
static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user)
{
return -EINVAL;
}
static inline int fcntl_setlk64(unsigned int fd, struct file *file,
unsigned int cmd, struct flock64 __user *user)
{
return -EACCES;
}
#endif
#define fcntl_setlease(a, b, c) ({ 0; })
#define fcntl_getlease(a) ({ 0; })
#define locks_init_lock(a) ({ })
#define __locks_copy_lock(a, b) ({ })
#define locks_copy_lock(a, b) ({ })
#define locks_remove_posix(a, b) ({ })
#define locks_remove_flock(a) ({ })
#define posix_test_lock(a, b) ({ 0; })
#define posix_lock_file(a, b, c) ({ -ENOLCK; })
#define posix_lock_file_wait(a, b) ({ -ENOLCK; })
#define posix_unblock_lock(a, b) (-ENOENT)
#define vfs_test_lock(a, b) ({ 0; })
#define vfs_lock_file(a, b, c, d) (-ENOLCK)
#define vfs_cancel_lock(a, b) ({ 0; })
#define flock_lock_file_wait(a, b) ({ -ENOLCK; })
#define __break_lease(a, b) ({ 0; })
#define lease_get_mtime(a, b) ({ })
#define generic_setlease(a, b, c) ({ -EINVAL; })
#define vfs_setlease(a, b, c) ({ -EINVAL; })
#define lease_modify(a, b) ({ -EINVAL; })
#define lock_may_read(a, b, c) ({ 1; })
#define lock_may_write(a, b, c) ({ 1; })
static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
return 0;
}
static inline int fcntl_getlease(struct file *filp)
{
return 0;
}
static inline void locks_init_lock(struct file_lock *fl)
{
return;
}
static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
return;
}
static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
return;
}
static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
return;
}
static inline void locks_remove_flock(struct file *filp)
{
return;
}
static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
{
return;
}
static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
struct file_lock *conflock)
{
return -ENOLCK;
}
static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
{
return -ENOLCK;
}
static inline int posix_unblock_lock(struct file *filp,
struct file_lock *waiter)
{
return -ENOENT;
}
static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
{
return 0;
}
static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
struct file_lock *fl, struct file_lock *conf)
{
return -ENOLCK;
}
static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
{
return 0;
}
static inline int flock_lock_file_wait(struct file *filp,
struct file_lock *request)
{
return -ENOLCK;
}
static inline int __break_lease(struct inode *inode, unsigned int mode)
{
return 0;
}
static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
{
return;
}
static inline int generic_setlease(struct file *filp, long arg,
struct file_lock **flp)
{
return -EINVAL;
}
static inline int vfs_setlease(struct file *filp, long arg,
struct file_lock **lease)
{
return -EINVAL;
}
static inline int lease_modify(struct file_lock **before, int arg)
{
return -EINVAL;
}
static inline int lock_may_read(struct inode *inode, loff_t start,
unsigned long len)
{
return 1;
}
static inline int lock_may_write(struct inode *inode, loff_t start,
unsigned long len)
{
return 1;
}
#endif /* !CONFIG_FILE_LOCKING */
@@ -1606,7 +1720,7 @@ struct super_block *sget(struct file_system_type *type,
extern int get_sb_pseudo(struct file_system_type *, char *,
const struct super_operations *ops, unsigned long,
struct vfsmount *mnt);
extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
@@ -1627,6 +1741,8 @@ extern void drop_collected_mounts(struct vfsmount *);
extern int vfs_statfs(struct dentry *, struct kstatfs *);
extern int current_umask(void);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -1687,13 +1803,44 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
return 0;
}
#else /* !CONFIG_FILE_LOCKING */
#define locks_mandatory_locked(a) ({ 0; })
#define locks_mandatory_area(a, b, c, d, e) ({ 0; })
#define __mandatory_lock(a) ({ 0; })
#define mandatory_lock(a) ({ 0; })
#define locks_verify_locked(a) ({ 0; })
#define locks_verify_truncate(a, b, c) ({ 0; })
#define break_lease(a, b) ({ 0; })
static inline int locks_mandatory_locked(struct inode *inode)
{
return 0;
}
static inline int locks_mandatory_area(int rw, struct inode *inode,
struct file *filp, loff_t offset,
size_t count)
{
return 0;
}
static inline int __mandatory_lock(struct inode *inode)
{
return 0;
}
static inline int mandatory_lock(struct inode *inode)
{
return 0;
}
static inline int locks_verify_locked(struct inode *inode)
{
return 0;
}
static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
size_t size)
{
return 0;
}
static inline int break_lease(struct inode *inode, unsigned int mode)
{
return 0;
}
#endif /* CONFIG_FILE_LOCKING */
/* fs/open.c */
@@ -1730,8 +1877,28 @@ extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, fmode_t);
extern void invalidate_bdev(struct block_device *);
extern int sync_blockdev(struct block_device *bdev);
extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
extern int fsync_super(struct super_block *);
extern int fsync_no_super(struct block_device *);
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
static inline void invalidate_bdev(struct block_device *bdev) {}
static inline struct super_block *freeze_bdev(struct block_device *sb)
{
return NULL;
}
static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
return 0;
}
#endif
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
@@ -1881,7 +2048,6 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
}
extern int do_pipe(int *);
extern int do_pipe_flags(int *, int);
extern struct file *create_read_pipe(struct file *f, int flags);
extern struct file *create_write_pipe(int flags);

View File

@@ -4,9 +4,10 @@
#include <linux/path.h>
struct fs_struct {
atomic_t count;
int users;
rwlock_t lock;
int umask;
int in_exec;
struct path root, pwd;
};
@@ -16,6 +17,8 @@ extern void exit_fs(struct task_struct *);
extern void set_fs_root(struct fs_struct *, struct path *);
extern void set_fs_pwd(struct fs_struct *, struct path *);
extern struct fs_struct *copy_fs_struct(struct fs_struct *);
extern void put_fs_struct(struct fs_struct *);
extern void free_fs_struct(struct fs_struct *);
extern void daemonize_fs_struct(void);
extern int unshare_fs_struct(void);
#endif /* _LINUX_FS_STRUCT_H */

View File

@@ -0,0 +1,505 @@
/* General filesystem caching backing cache interface
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* NOTE!!! See:
*
* Documentation/filesystems/caching/backend-api.txt
*
* for a description of the cache backend interface declared here.
*/
#ifndef _LINUX_FSCACHE_CACHE_H
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
#include <linux/sched.h>
#include <linux/slow-work.h>
#define NR_MAXCACHES BITS_PER_LONG
struct fscache_cache;
struct fscache_cache_ops;
struct fscache_object;
struct fscache_operation;
/*
* cache tag definition
*/
struct fscache_cache_tag {
struct list_head link;
struct fscache_cache *cache; /* cache referred to by this tag */
unsigned long flags;
#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
atomic_t usage;
char name[0]; /* tag name */
};
/*
* cache definition
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
struct fscache_cache_tag *tag; /* tag representing this cache */
struct kobject *kobj; /* system representation of this cache */
struct list_head link; /* link in list of caches */
size_t max_index_size; /* maximum size of index data */
char identifier[36]; /* cache label */
/* node management */
struct work_struct op_gc; /* operation garbage collector */
struct list_head object_list; /* list of data/index objects */
struct list_head op_gc_list; /* list of ops to be deleted */
spinlock_t object_list_lock;
spinlock_t op_gc_list_lock;
atomic_t object_count; /* no. of live objects in this cache */
struct fscache_object *fsdef; /* object for the fsdef index */
unsigned long flags;
#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
};
extern wait_queue_head_t fscache_cache_cleared_wq;
/*
* operation to be applied to a cache object
* - retrieval initiation operations are done in the context of the process
* that issued them, and not in an async thread pool
*/
typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
struct fscache_operation {
union {
struct work_struct fast_work; /* record for fast ops */
struct slow_work slow_work; /* record for (very) slow ops */
};
struct list_head pend_link; /* link in object->pending_ops */
struct fscache_object *object; /* object to be operated upon */
unsigned long flags;
#define FSCACHE_OP_TYPE 0x000f /* operation type */
#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEAD 6 /* op is now dead */
atomic_t usage;
unsigned debug_id; /* debugging ID */
/* operation processor callback
* - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
* the op in a non-pool thread */
fscache_operation_processor_t processor;
/* operation releaser */
fscache_operation_release_t release;
};
extern atomic_t fscache_op_debug_id;
extern const struct slow_work_ops fscache_op_slow_work_ops;
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_put_operation(struct fscache_operation *);
/**
* fscache_operation_init - Do basic initialisation of an operation
* @op: The operation to initialise
* @release: The release function to assign
*
* Do basic initialisation of an operation. The caller must still set flags,
* object, either fast_work or slow_work if necessary, and processor if needed.
*/
static inline void fscache_operation_init(struct fscache_operation *op,
fscache_operation_release_t release)
{
atomic_set(&op->usage, 1);
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
op->release = release;
INIT_LIST_HEAD(&op->pend_link);
}
/**
* fscache_operation_init_slow - Do additional initialisation of a slow op
* @op: The operation to initialise
* @processor: The processor function to assign
*
* Do additional initialisation of an operation as required for slow work.
*/
static inline
void fscache_operation_init_slow(struct fscache_operation *op,
fscache_operation_processor_t processor)
{
op->processor = processor;
slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
}
/*
* data read operation
*/
struct fscache_retrieval {
struct fscache_operation op;
struct address_space *mapping; /* netfs pages */
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
struct list_head to_do; /* list of things to be done by the backend */
unsigned long start_time; /* time at which retrieval started */
};
typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
struct page *page,
gfp_t gfp);
typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
struct list_head *pages,
unsigned *nr_pages,
gfp_t gfp);
/**
* fscache_get_retrieval - Get an extra reference on a retrieval operation
* @op: The retrieval operation to get a reference on
*
* Get an extra reference on a retrieval operation.
*/
static inline
struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
{
atomic_inc(&op->op.usage);
return op;
}
/**
* fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
* @op: The retrieval operation affected
*
* Enqueue a retrieval operation for processing by the FS-Cache thread pool.
*/
static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
{
fscache_enqueue_operation(&op->op);
}
/**
* fscache_put_retrieval - Drop a reference to a retrieval operation
* @op: The retrieval operation affected
*
* Drop a reference to a retrieval operation.
*/
static inline void fscache_put_retrieval(struct fscache_retrieval *op)
{
fscache_put_operation(&op->op);
}
/*
* cached page storage work item
* - used to do three things:
* - batch writes to the cache
* - do cache writes asynchronously
* - defer writes until cache object lookup completion
*/
struct fscache_storage {
struct fscache_operation op;
pgoff_t store_limit; /* don't write more than this */
};
/*
* cache operations
*/
struct fscache_cache_ops {
/* name of cache provider */
const char *name;
/* allocate an object record for a cookie */
struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
struct fscache_cookie *cookie);
/* look up the object for a cookie */
void (*lookup_object)(struct fscache_object *object);
/* finished looking up */
void (*lookup_complete)(struct fscache_object *object);
/* increment the usage count on this object (may fail if unmounting) */
struct fscache_object *(*grab_object)(struct fscache_object *object);
/* pin an object in the cache */
int (*pin_object)(struct fscache_object *object);
/* unpin an object in the cache */
void (*unpin_object)(struct fscache_object *object);
/* store the updated auxilliary data on an object */
void (*update_object)(struct fscache_object *object);
/* discard the resources pinned by an object and effect retirement if
* necessary */
void (*drop_object)(struct fscache_object *object);
/* dispose of a reference to an object */
void (*put_object)(struct fscache_object *object);
/* sync a cache */
void (*sync_cache)(struct fscache_cache *cache);
/* notification that the attributes of a non-index object (such as
* i_size) have changed */
int (*attr_changed)(struct fscache_object *object);
/* reserve space for an object's data and associated metadata */
int (*reserve_space)(struct fscache_object *object, loff_t i_size);
/* request a backing block for a page be read or allocated in the
* cache */
fscache_page_retrieval_func_t read_or_alloc_page;
/* request backing blocks for a list of pages be read or allocated in
* the cache */
fscache_pages_retrieval_func_t read_or_alloc_pages;
/* request a backing block for a page be allocated in the cache so that
* it can be written directly */
fscache_page_retrieval_func_t allocate_page;
/* request backing blocks for pages be allocated in the cache so that
* they can be written directly */
fscache_pages_retrieval_func_t allocate_pages;
/* write a page to its backing block in the cache */
int (*write_page)(struct fscache_storage *op, struct page *page);
/* detach backing block from a page (optional)
* - must release the cookie lock before returning
* - may sleep
*/
void (*uncache_page)(struct fscache_object *object,
struct page *page);
/* dissociate a cache from all the pages it was backing */
void (*dissociate_pages)(struct fscache_cache *cache);
};
/*
* data file or index object cookie
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */
spinlock_t lock;
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
unsigned long flags;
#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
};
extern struct fscache_cookie fscache_fsdef_index;
/*
* on-disk cache file or index handle
*/
struct fscache_object {
enum fscache_object_state {
FSCACHE_OBJECT_INIT, /* object in initial unbound state */
FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
FSCACHE_OBJECT_CREATING, /* creating object */
/* active states */
FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
FSCACHE_OBJECT_ACTIVE, /* object is usable */
FSCACHE_OBJECT_UPDATING, /* object is updating */
/* terminal states */
FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
FSCACHE_OBJECT_RELEASING, /* releasing object */
FSCACHE_OBJECT_RECYCLING, /* retiring object */
FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
FSCACHE_OBJECT_DEAD, /* object is now dead */
} state;
int debug_id; /* debugging ID */
int n_children; /* number of child objects */
int n_ops; /* number of ops outstanding on object */
int n_obj_ops; /* number of object ops outstanding on object */
int n_in_progress; /* number of ops in progress */
int n_exclusive; /* number of exclusive ops queued */
spinlock_t lock; /* state and operations lock */
unsigned long lookup_jif; /* time at which lookup started */
unsigned long event_mask; /* events this object is interested in */
unsigned long events; /* events to be processed by this object
* (order is important - using fls) */
#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
unsigned long flags;
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
struct fscache_cache *cache; /* cache that supplied this object */
struct fscache_cookie *cookie; /* netfs's file/index object */
struct fscache_object *parent; /* parent object */
struct slow_work work; /* attention scheduling record */
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
pgoff_t store_limit; /* current storage limit */
};
extern const char *fscache_object_states[];
#define fscache_object_is_active(obj) \
(!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
(obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
(obj)->state < FSCACHE_OBJECT_DYING)
extern const struct slow_work_ops fscache_object_slow_work_ops;
/**
* fscache_object_init - Initialise a cache object description
* @object: Object description
*
* Initialise a cache object description to its basic values.
*
* See Documentation/filesystems/caching/backend-api.txt for a complete
* description.
*/
static inline
void fscache_object_init(struct fscache_object *object,
struct fscache_cookie *cookie,
struct fscache_cache *cache)
{
atomic_inc(&cache->object_count);
object->state = FSCACHE_OBJECT_INIT;
spin_lock_init(&object->lock);
INIT_LIST_HEAD(&object->cache_link);
INIT_HLIST_NODE(&object->cookie_link);
vslow_work_init(&object->work, &fscache_object_slow_work_ops);
INIT_LIST_HEAD(&object->dependents);
INIT_LIST_HEAD(&object->dep_link);
INIT_LIST_HEAD(&object->pending_ops);
object->n_children = 0;
object->n_ops = object->n_in_progress = object->n_exclusive = 0;
object->events = object->event_mask = 0;
object->flags = 0;
object->store_limit = 0;
object->cache = cache;
object->cookie = cookie;
object->parent = NULL;
}
extern void fscache_object_lookup_negative(struct fscache_object *object);
extern void fscache_obtained_object(struct fscache_object *object);
/**
* fscache_object_destroyed - Note destruction of an object in a cache
* @cache: The cache from which the object came
*
* Note the destruction and deallocation of an object record in a cache.
*/
static inline void fscache_object_destroyed(struct fscache_cache *cache)
{
if (atomic_dec_and_test(&cache->object_count))
wake_up_all(&fscache_cache_cleared_wq);
}
/**
* fscache_object_lookup_error - Note an object encountered an error
* @object: The object on which the error was encountered
*
* Note that an object encountered a fatal error (usually an I/O error) and
* that it should be withdrawn as soon as possible.
*/
static inline void fscache_object_lookup_error(struct fscache_object *object)
{
set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
}
/**
* fscache_set_store_limit - Set the maximum size to be stored in an object
* @object: The object to set the maximum on
* @i_size: The limit to set in bytes
*
* Set the maximum size an object is permitted to reach, implying the highest
* byte that may be written. Intended to be called by the attr_changed() op.
*
* See Documentation/filesystems/caching/backend-api.txt for a complete
* description.
*/
static inline
void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
{
object->store_limit = i_size >> PAGE_SHIFT;
if (i_size & ~PAGE_MASK)
object->store_limit++;
}
/**
* fscache_end_io - End a retrieval operation on a page
* @op: The FS-Cache operation covering the retrieval
* @page: The page that was to be fetched
* @error: The error code (0 if successful)
*
* Note the end of an operation to retrieve a page, as covered by a particular
* operation record.
*/
static inline void fscache_end_io(struct fscache_retrieval *op,
struct page *page, int error)
{
op->end_io_func(page, op->context, error);
}
/*
* out-of-line cache backend functions
*/
extern void fscache_init_cache(struct fscache_cache *cache,
const struct fscache_cache_ops *ops,
const char *idfmt,
...) __attribute__ ((format (printf, 3, 4)));
extern int fscache_add_cache(struct fscache_cache *cache,
struct fscache_object *fsdef,
const char *tagname);
extern void fscache_withdraw_cache(struct fscache_cache *cache);
extern void fscache_io_error(struct fscache_cache *cache);
extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
struct pagevec *pagevec);
extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
uint16_t datalen);
#endif /* _LINUX_FSCACHE_CACHE_H */

618
include/linux/fscache.h Normal file
View File

@@ -0,0 +1,618 @@
/* General filesystem caching interface
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* NOTE!!! See:
*
* Documentation/filesystems/caching/netfs-api.txt
*
* for a description of the network filesystem interface declared here.
*/
#ifndef _LINUX_FSCACHE_H
#define _LINUX_FSCACHE_H
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
#define fscache_available() (1)
#define fscache_cookie_valid(cookie) (cookie)
#else
#define fscache_available() (0)
#define fscache_cookie_valid(cookie) (0)
#endif
/*
* overload PG_private_2 to give us PG_fscache - this is used to indicate that
* a page is currently backed by a local disk cache
*/
#define PageFsCache(page) PagePrivate2((page))
#define SetPageFsCache(page) SetPagePrivate2((page))
#define ClearPageFsCache(page) ClearPagePrivate2((page))
#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
/* pattern used to fill dead space in an index entry */
#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
struct pagevec;
struct fscache_cache_tag;
struct fscache_cookie;
struct fscache_netfs;
typedef void (*fscache_rw_complete_t)(struct page *page,
void *context,
int error);
/* result of index entry consultation */
enum fscache_checkaux {
FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
};
/*
* fscache cookie definition
*/
struct fscache_cookie_def {
/* name of cookie type */
char name[16];
/* cookie type */
uint8_t type;
#define FSCACHE_COOKIE_TYPE_INDEX 0
#define FSCACHE_COOKIE_TYPE_DATAFILE 1
/* select the cache into which to insert an entry in this index
* - optional
* - should return a cache identifier or NULL to cause the cache to be
* inherited from the parent if possible or the first cache picked
* for a non-index file if not
*/
struct fscache_cache_tag *(*select_cache)(
const void *parent_netfs_data,
const void *cookie_netfs_data);
/* get an index key
* - should store the key data in the buffer
* - should return the amount of amount stored
* - not permitted to return an error
* - the netfs data from the cookie being used as the source is
* presented
*/
uint16_t (*get_key)(const void *cookie_netfs_data,
void *buffer,
uint16_t bufmax);
/* get certain file attributes from the netfs data
* - this function can be absent for an index
* - not permitted to return an error
* - the netfs data from the cookie being used as the source is
* presented
*/
void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
/* get the auxilliary data from netfs data
* - this function can be absent if the index carries no state data
* - should store the auxilliary data in the buffer
* - should return the amount of amount stored
* - not permitted to return an error
* - the netfs data from the cookie being used as the source is
* presented
*/
uint16_t (*get_aux)(const void *cookie_netfs_data,
void *buffer,
uint16_t bufmax);
/* consult the netfs about the state of an object
* - this function can be absent if the index carries no state data
* - the netfs data from the cookie being used as the target is
* presented, as is the auxilliary data
*/
enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
const void *data,
uint16_t datalen);
/* get an extra reference on a read context
* - this function can be absent if the completion function doesn't
* require a context
*/
void (*get_context)(void *cookie_netfs_data, void *context);
/* release an extra reference on a read context
* - this function can be absent if the completion function doesn't
* require a context
*/
void (*put_context)(void *cookie_netfs_data, void *context);
/* indicate pages that now have cache metadata retained
* - this function should mark the specified pages as now being cached
* - the pages will have been marked with PG_fscache before this is
* called, so this is optional
*/
void (*mark_pages_cached)(void *cookie_netfs_data,
struct address_space *mapping,
struct pagevec *cached_pvec);
/* indicate the cookie is no longer cached
* - this function is called when the backing store currently caching
* a cookie is removed
* - the netfs should use this to clean up any markers indicating
* cached pages
* - this is mandatory for any object that may have data
*/
void (*now_uncached)(void *cookie_netfs_data);
};
/*
* fscache cached network filesystem type
* - name, version and ops must be filled in before registration
* - all other fields will be set during registration
*/
struct fscache_netfs {
uint32_t version; /* indexing version */
const char *name; /* filesystem name */
struct fscache_cookie *primary_index;
struct list_head link; /* internal link */
};
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
* - these are not to be called directly
* - these are undefined symbols when FS-Cache is not configured and the
* optimiser takes care of not using them
*/
extern int __fscache_register_netfs(struct fscache_netfs *);
extern void __fscache_unregister_netfs(struct fscache_netfs *);
extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
extern struct fscache_cookie *__fscache_acquire_cookie(
struct fscache_cookie *,
const struct fscache_cookie_def *,
void *);
extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
extern void __fscache_update_cookie(struct fscache_cookie *);
extern int __fscache_attr_changed(struct fscache_cookie *);
extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
struct page *,
fscache_rw_complete_t,
void *,
gfp_t);
extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
struct address_space *,
struct list_head *,
unsigned *,
fscache_rw_complete_t,
void *,
gfp_t);
extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
* @netfs: The description of the filesystem
*
* Register a filesystem as desiring caching services if they're available.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_register_netfs(struct fscache_netfs *netfs)
{
if (fscache_available())
return __fscache_register_netfs(netfs);
else
return 0;
}
/**
* fscache_unregister_netfs - Indicate that a filesystem no longer desires
* caching services
* @netfs: The description of the filesystem
*
* Indicate that a filesystem no longer desires caching services for the
* moment.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
void fscache_unregister_netfs(struct fscache_netfs *netfs)
{
if (fscache_available())
__fscache_unregister_netfs(netfs);
}
/**
* fscache_lookup_cache_tag - Look up a cache tag
* @name: The name of the tag to search for
*
* Acquire a specific cache referral tag that can be used to select a specific
* cache in which to cache an index.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
{
if (fscache_available())
return __fscache_lookup_cache_tag(name);
else
return NULL;
}
/**
* fscache_release_cache_tag - Release a cache tag
* @tag: The tag to release
*
* Release a reference to a cache referral tag previously looked up.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
void fscache_release_cache_tag(struct fscache_cache_tag *tag)
{
if (fscache_available())
__fscache_release_cache_tag(tag);
}
/**
* fscache_acquire_cookie - Acquire a cookie to represent a cache object
* @parent: The cookie that's to be the parent of this one
* @def: A description of the cache object, including callback operations
* @netfs_data: An arbitrary piece of data to be kept in the cookie to
* represent the cache object to the netfs
*
* This function is used to inform FS-Cache about part of an index hierarchy
* that can be used to locate files. This is done by requesting a cookie for
* each index in the path to the file.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
struct fscache_cookie *fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
void *netfs_data)
{
if (fscache_cookie_valid(parent))
return __fscache_acquire_cookie(parent, def, netfs_data);
else
return NULL;
}
/**
* fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
* it
* @cookie: The cookie being returned
* @retire: True if the cache object the cookie represents is to be discarded
*
* This function returns a cookie to the cache, forcibly discarding the
* associated cache object if retire is set to true.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
{
if (fscache_cookie_valid(cookie))
__fscache_relinquish_cookie(cookie, retire);
}
/**
* fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
*
* Request an update of the index data for the cache object associated with the
* cookie.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
void fscache_update_cookie(struct fscache_cookie *cookie)
{
if (fscache_cookie_valid(cookie))
__fscache_update_cookie(cookie);
}
/**
* fscache_pin_cookie - Pin a data-storage cache object in its cache
* @cookie: The cookie representing the cache object
*
* Permit data-storage cache objects to be pinned in the cache.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_pin_cookie(struct fscache_cookie *cookie)
{
return -ENOBUFS;
}
/**
* fscache_pin_cookie - Unpin a data-storage cache object in its cache
* @cookie: The cookie representing the cache object
*
* Permit data-storage cache objects to be unpinned from the cache.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
void fscache_unpin_cookie(struct fscache_cookie *cookie)
{
}
/**
* fscache_attr_changed - Notify cache that an object's attributes changed
* @cookie: The cookie representing the cache object
*
* Send a notification to the cache indicating that an object's attributes have
* changed. This includes the data size. These attributes will be obtained
* through the get_attr() cookie definition op.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_attr_changed(struct fscache_cookie *cookie)
{
if (fscache_cookie_valid(cookie))
return __fscache_attr_changed(cookie);
else
return -ENOBUFS;
}
/**
* fscache_reserve_space - Reserve data space for a cached object
* @cookie: The cookie representing the cache object
* @i_size: The amount of space to be reserved
*
* Reserve an amount of space in the cache for the cache object attached to a
* cookie so that a write to that object within the space can always be
* honoured.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
{
return -ENOBUFS;
}
/**
* fscache_read_or_alloc_page - Read a page from the cache or allocate a block
* in which to store it
* @cookie: The cookie representing the cache object
* @page: The netfs page to fill if possible
* @end_io_func: The callback to invoke when and if the page is filled
* @context: An arbitrary piece of data to pass on to end_io_func()
* @gfp: The conditions under which memory allocation should be made
*
* Read a page from the cache, or if that's not possible make a potential
* one-block reservation in the cache into which the page may be stored once
* fetched from the server.
*
* If the page is not backed by the cache object, or if it there's some reason
* it can't be, -ENOBUFS will be returned and nothing more will be done for
* that page.
*
* Else, if that page is backed by the cache, a read will be initiated directly
* to the netfs's page and 0 will be returned by this function. The
* end_io_func() callback will be invoked when the operation terminates on a
* completion or failure. Note that the callback may be invoked before the
* return.
*
* Else, if the page is unbacked, -ENODATA is returned and a block may have
* been allocated in the cache.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
struct page *page,
fscache_rw_complete_t end_io_func,
void *context,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie))
return __fscache_read_or_alloc_page(cookie, page, end_io_func,
context, gfp);
else
return -ENOBUFS;
}
/**
* fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
* blocks in which to store them
* @cookie: The cookie representing the cache object
* @mapping: The netfs inode mapping to which the pages will be attached
* @pages: A list of potential netfs pages to be filled
* @end_io_func: The callback to invoke when and if each page is filled
* @context: An arbitrary piece of data to pass on to end_io_func()
* @gfp: The conditions under which memory allocation should be made
*
* Read a set of pages from the cache, or if that's not possible, attempt to
* make a potential one-block reservation for each page in the cache into which
* that page may be stored once fetched from the server.
*
* If some pages are not backed by the cache object, or if it there's some
* reason they can't be, -ENOBUFS will be returned and nothing more will be
* done for that pages.
*
* Else, if some of the pages are backed by the cache, a read will be initiated
* directly to the netfs's page and 0 will be returned by this function. The
* end_io_func() callback will be invoked when the operation terminates on a
* completion or failure. Note that the callback may be invoked before the
* return.
*
* Else, if a page is unbacked, -ENODATA is returned and a block may have
* been allocated in the cache.
*
* Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
* regard to different pages, the return values are prioritised in that order.
* Any pages submitted for reading are removed from the pages list.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
struct address_space *mapping,
struct list_head *pages,
unsigned *nr_pages,
fscache_rw_complete_t end_io_func,
void *context,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie))
return __fscache_read_or_alloc_pages(cookie, mapping, pages,
nr_pages, end_io_func,
context, gfp);
else
return -ENOBUFS;
}
/**
* fscache_alloc_page - Allocate a block in which to store a page
* @cookie: The cookie representing the cache object
* @page: The netfs page to allocate a page for
* @gfp: The conditions under which memory allocation should be made
*
* Request Allocation a block in the cache in which to store a netfs page
* without retrieving any contents from the cache.
*
* If the page is not backed by a file then -ENOBUFS will be returned and
* nothing more will be done, and no reservation will be made.
*
* Else, a block will be allocated if one wasn't already, and 0 will be
* returned
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_alloc_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie))
return __fscache_alloc_page(cookie, page, gfp);
else
return -ENOBUFS;
}
/**
* fscache_write_page - Request storage of a page in the cache
* @cookie: The cookie representing the cache object
* @page: The netfs page to store
* @gfp: The conditions under which memory allocation should be made
*
* Request the contents of the netfs page be written into the cache. This
* request may be ignored if no cache block is currently allocated, in which
* case it will return -ENOBUFS.
*
* If a cache block was already allocated, a write will be initiated and 0 will
* be returned. The PG_fscache_write page bit is set immediately and will then
* be cleared at the completion of the write to indicate the success or failure
* of the operation. Note that the completion may happen before the return.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
int fscache_write_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
if (fscache_cookie_valid(cookie))
return __fscache_write_page(cookie, page, gfp);
else
return -ENOBUFS;
}
/**
* fscache_uncache_page - Indicate that caching is no longer required on a page
* @cookie: The cookie representing the cache object
* @page: The netfs page that was being cached.
*
* Tell the cache that we no longer want a page to be cached and that it should
* remove any knowledge of the netfs page it may have.
*
* Note that this cannot cancel any outstanding I/O operations between this
* page and the cache.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
void fscache_uncache_page(struct fscache_cookie *cookie,
struct page *page)
{
if (fscache_cookie_valid(cookie))
__fscache_uncache_page(cookie, page);
}
/**
* fscache_check_page_write - Ask if a page is being writing to the cache
* @cookie: The cookie representing the cache object
* @page: The netfs page that is being cached.
*
* Ask the cache if a page is being written to the cache.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
bool fscache_check_page_write(struct fscache_cookie *cookie,
struct page *page)
{
if (fscache_cookie_valid(cookie))
return __fscache_check_page_write(cookie, page);
return false;
}
/**
* fscache_wait_on_page_write - Wait for a page to complete writing to the cache
* @cookie: The cookie representing the cache object
* @page: The netfs page that is being cached.
*
* Ask the cache to wake us up when a page is no longer being written to the
* cache.
*
* See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
void fscache_wait_on_page_write(struct fscache_cookie *cookie,
struct page *page)
{
if (fscache_cookie_valid(cookie))
__fscache_wait_on_page_write(cookie, page);
}
#endif /* _LINUX_FSCACHE_H */

View File

@@ -95,14 +95,15 @@ struct fsl_usb2_platform_data {
#define FSL_USB2_PORT0_ENABLED 0x00000001
#define FSL_USB2_PORT1_ENABLED 0x00000002
struct spi_device;
struct fsl_spi_platform_data {
u32 initial_spmode; /* initial SPMODE value */
u16 bus_num;
s16 bus_num;
bool qe_mode;
/* board specific information */
u16 max_chipselect;
void (*activate_cs)(u8 cs, u8 polarity);
void (*deactivate_cs)(u8 cs, u8 polarity);
void (*cs_control)(struct spi_device *spi, bool on);
u32 sysclk;
};

View File

@@ -1,15 +1,18 @@
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
#include <linux/linkage.h>
#include <linux/fs.h>
#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/trace_clock.h>
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
loff_t *ppos);
#endif
struct ftrace_func_command {
struct list_head list;
char *name;
int (*func)(char *func, char *cmd,
char *params, int enable);
};
#ifdef CONFIG_DYNAMIC_FTRACE
/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
#include <asm/ftrace.h>
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);
struct seq_file;
struct ftrace_probe_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
int (*callback)(unsigned long ip, void **data);
void (*free)(void **data);
int (*print)(struct seq_file *m,
unsigned long ip,
struct ftrace_probe_ops *ops,
void *data);
};
extern int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data);
extern void
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data);
extern void
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
enum {
FTRACE_FL_FREE = (1 << 0),
@@ -110,15 +145,23 @@ enum {
};
struct dyn_ftrace {
struct list_head list;
unsigned long ip; /* address of mcount call-site */
unsigned long flags;
struct dyn_arch_ftrace arch;
union {
unsigned long ip; /* address of mcount call-site */
struct dyn_ftrace *freelist;
};
union {
unsigned long flags;
struct dyn_ftrace *newlist;
};
struct dyn_arch_ftrace arch;
};
int ftrace_force_update(void);
void ftrace_set_filter(unsigned char *buf, int len, int reset);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
/* defined in arch */
extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
#ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
#endif
/**
* ftrace_make_nop - convert code into top
* ftrace_make_nop - convert code into nop
* @mod: module structure if called by module load initialization
* @rec: the mcount call site record
* @addr: the address that the call site should be calling
@@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod,
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
@@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void);
# define ftrace_disable_daemon() do { } while (0)
# define ftrace_enable_daemon() do { } while (0)
static inline void ftrace_release(void *start, unsigned long size) { }
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
}
static inline int unregister_ftrace_command(char *cmd_name)
{
return -EINVAL;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
}
#ifdef CONFIG_FRAME_POINTER
/* TODO: need to fix this for ARM */
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
#else
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
# define CALLER_ADDR1 0UL
# define CALLER_ADDR2 0UL
# define CALLER_ADDR3 0UL
# define CALLER_ADDR4 0UL
# define CALLER_ADDR5 0UL
# define CALLER_ADDR6 0UL
#endif
#ifndef HAVE_ARCH_CALLER_ADDR
# ifdef CONFIG_FRAME_POINTER
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
# else
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
# define CALLER_ADDR1 0UL
# define CALLER_ADDR2 0UL
# define CALLER_ADDR3 0UL
# define CALLER_ADDR4 0UL
# define CALLER_ADDR5 0UL
# define CALLER_ADDR6 0UL
# endif
#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
#ifdef CONFIG_TRACING
extern int ftrace_dump_on_oops;
extern void tracing_start(void);
extern void tracing_stop(void);
extern void ftrace_off_permanent(void);
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
/**
* ftrace_printk - printf formatting in the ftrace buffer
* @fmt: the printf format for printing
*
* Note: __ftrace_printk is an internal function for ftrace_printk and
* the @ip is passed in via the ftrace_printk macro.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
* printk like tracing in the code, a developer can quickly see
* where problems are occurring.
*
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving ftrace_printks scattered around in
* your code.
*/
# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
extern int
__ftrace_printk(unsigned long ip, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern void ftrace_dump(void);
#else
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
static inline int
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
static inline int
ftrace_printk(const char *fmt, ...)
{
return 0;
}
static inline void ftrace_dump(void) { }
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
extern void ftrace_init_module(struct module *mod,
@@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod,
unsigned long *start, unsigned long *end) { }
#endif
enum {
POWER_NONE = 0,
POWER_CSTATE = 1,
POWER_PSTATE = 2,
};
struct power_trace {
#ifdef CONFIG_POWER_TRACER
ktime_t stamp;
ktime_t end;
int type;
int state;
#endif
};
#ifdef CONFIG_POWER_TRACER
extern void trace_power_start(struct power_trace *it, unsigned int type,
unsigned int state);
extern void trace_power_mark(struct power_trace *it, unsigned int type,
unsigned int state);
extern void trace_power_end(struct power_trace *it);
#else
static inline void trace_power_start(struct power_trace *it, unsigned int type,
unsigned int state) { }
static inline void trace_power_mark(struct power_trace *it, unsigned int type,
unsigned int state) { }
static inline void trace_power_end(struct power_trace *it) { }
#endif
/*
* Structure that defines an entry function trace.
*/
@@ -379,6 +356,29 @@ struct ftrace_graph_ret {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Stack of return addresses for functions
* of a thread.
* Used in struct thread_info
*/
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry_32/64.S
*/
extern void return_to_handler(void);
extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
extern void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
/*
* Sometimes we don't want to trace a function with the function
* graph tracer but we want them to keep traced by the usual function
@@ -490,6 +490,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
extern int ftrace_dump_on_oops;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_HW_BRANCH_TRACER
void trace_hw_branch(u64 from, u64 to);
void trace_hw_branch_oops(void);
#else /* CONFIG_HW_BRANCH_TRACER */
static inline void trace_hw_branch(u64 from, u64 to) {}
static inline void trace_hw_branch_oops(void) {}
#endif /* CONFIG_HW_BRANCH_TRACER */
/*
* A syscall entry in the ftrace syscalls array.
*
* @name: name of the syscall
* @nb_args: number of parameters it takes
* @types: list of types as strings
* @args: list of args as strings (args[i] matches types[i])
*/
struct syscall_metadata {
const char *name;
int nb_args;
const char **types;
const char **args;
};
#ifdef CONFIG_FTRACE_SYSCALLS
extern void arch_init_ftrace_syscalls(void);
extern struct syscall_metadata *syscall_nr_to_meta(int nr);
extern void start_ftrace_syscalls(void);
extern void stop_ftrace_syscalls(void);
extern void ftrace_syscall_enter(struct pt_regs *regs);
extern void ftrace_syscall_exit(struct pt_regs *regs);
#else
static inline void start_ftrace_syscalls(void) { }
static inline void stop_ftrace_syscalls(void) { }
static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
#endif
#endif /* _LINUX_FTRACE_H */

View File

@@ -2,7 +2,7 @@
#define _LINUX_FTRACE_IRQ_H
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
#ifdef CONFIG_FTRACE_NMI_ENTER
extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_exit(void);
#else

View File

@@ -22,7 +22,7 @@ struct gnet_stats_basic
{
__u64 bytes;
__u32 packets;
};
} __attribute__ ((packed));
/**
* struct gnet_stats_rate_est - rate estimator

View File

@@ -333,11 +333,10 @@ static inline void part_dec_in_flight(struct hd_struct *part)
part_to_disk(part)->part0.in_flight--;
}
/* drivers/block/ll_rw_blk.c */
/* block/blk-core.c */
extern void part_round_stats(int cpu, struct hd_struct *part);
/* drivers/block/genhd.c */
extern int get_blkdev_list(char *, int);
/* block/genhd.c */
extern void add_disk(struct gendisk *disk);
extern void del_gendisk(struct gendisk *gp);
extern void unlink_gendisk(struct gendisk *gp);

View File

@@ -15,55 +15,61 @@
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count can be overridden per architecture, the default is:
* The hardirq count can in theory reach the same as NR_IRQS.
* In reality, the number of nested IRQS is limited to the stack
* size as well. For archs with over 1000 IRQS it is not practical
* to expect that they will all nest. We give a max of 10 bits for
* hardirq nesting. An arch may choose to give less than 10 bits.
* m68k expects it to be 8.
*
* - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
* - ( bit 28 is the PREEMPT_ACTIVE flag. )
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
* - bit 28 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x0fff0000
* HARDIRQ_MASK: 0x03ff0000
* NMI_MASK: 0x04000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define NMI_BITS 1
#define MAX_HARDIRQ_BITS 10
#ifndef HARDIRQ_BITS
#define HARDIRQ_BITS 12
#ifndef MAX_HARDIRQS_PER_CPU
#define MAX_HARDIRQS_PER_CPU NR_IRQS
# define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif
/*
* The hardirq mask has to be large enough to have space for potentially
* all IRQ sources in the system nesting on a single CPU.
*/
#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
# error HARDIRQ_BITS is too low!
#endif
#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
#error HARDIRQ_BITS too high!
#endif
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low!
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
/*
* Are we in NMI context?
*/
#define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT)
# define PREEMPT_INATOMIC_BASE kernel_locked()
# define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
*/
extern void irq_exit(void);
#define nmi_enter() \
do { \
ftrace_nmi_enter(); \
lockdep_off(); \
rcu_nmi_enter(); \
__irq_enter(); \
#define nmi_enter() \
do { \
ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \
add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
lockdep_off(); \
rcu_nmi_enter(); \
trace_hardirq_enter(); \
} while (0)
#define nmi_exit() \
do { \
__irq_exit(); \
rcu_nmi_exit(); \
lockdep_on(); \
ftrace_nmi_exit(); \
#define nmi_exit() \
do { \
trace_hardirq_exit(); \
rcu_nmi_exit(); \
lockdep_on(); \
BUG_ON(!in_nmi()); \
sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */

View File

@@ -38,6 +38,7 @@ struct hdlc_proto {
int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
int (*xmit)(struct sk_buff *skb, struct net_device *dev);
struct module *module;
struct hdlc_proto *next; /* next protocol in the list */
};
@@ -102,6 +103,10 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
int hdlc_open(struct net_device *dev);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev);
/* May be used by hardware driver */
int hdlc_change_mtu(struct net_device *dev, int new_mtu);
/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */
int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
size_t size);

View File

@@ -215,7 +215,7 @@ struct hdlcdrv_state {
struct hdlcdrv_hdlctx {
struct hdlcdrv_hdlcbuffer hbuf;
long in_hdlc_tx;
unsigned long in_hdlc_tx;
/*
* 0 = send flags
* 1 = send txtail (flags)
@@ -241,7 +241,6 @@ struct hdlcdrv_state {
struct hdlcdrv_bitbuffer bitbuf_hdlc;
#endif /* HDLCDRV_DEBUG */
struct net_device_stats stats;
int ptt_keyed;
/* queued skb for transmission */

View File

@@ -1,68 +1,6 @@
#ifndef _LINUX_HDREG_H
#define _LINUX_HDREG_H
#ifdef __KERNEL__
#include <linux/ata.h>
/*
* This file contains some defines for the AT-hd-controller.
* Various sources.
*/
/* ide.c has its own port definitions in "ide.h" */
#define HD_IRQ 14
/* Hd controller regs. Ref: IBM AT Bios-listing */
#define HD_DATA 0x1f0 /* _CTL when writing */
#define HD_ERROR 0x1f1 /* see err-bits */
#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
#define HD_SECTOR 0x1f3 /* starting sector */
#define HD_LCYL 0x1f4 /* starting cylinder */
#define HD_HCYL 0x1f5 /* high byte of starting cyl */
#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
#define HD_STATUS 0x1f7 /* see status-bits */
#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
#define HD_CMD 0x3f6 /* used for resets */
#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
/* Bits of HD_STATUS */
#define ERR_STAT 0x01
#define INDEX_STAT 0x02
#define ECC_STAT 0x04 /* Corrected error */
#define DRQ_STAT 0x08
#define SEEK_STAT 0x10
#define SRV_STAT 0x10
#define WRERR_STAT 0x20
#define READY_STAT 0x40
#define BUSY_STAT 0x80
/* Bits for HD_ERROR */
#define MARK_ERR 0x01 /* Bad address mark */
#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */
#define TRK0_ERR 0x02 /* couldn't find track 0 */
#define EOM_ERR 0x02 /* End Of Media (ATAPI) */
#define ABRT_ERR 0x04 /* Command aborted */
#define MCR_ERR 0x08 /* media change request */
#define ID_ERR 0x10 /* ID field not found */
#define MC_ERR 0x20 /* media changed */
#define ECC_ERR 0x40 /* Uncorrectable ECC error */
#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */
/* Bits of HD_NSECTOR */
#define CD 0x01
#define IO 0x02
#define REL 0x04
#define TAG_MASK 0xf8
#endif /* __KERNEL__ */
#include <linux/types.h>
/*
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr {
#define TASKFILE_INVALID 0x7fff
#endif
#ifndef __KERNEL__
/* ATA/ATAPI Commands pre T13 Spec */
#define WIN_NOP 0x00
/*
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr {
#define SECURITY_ERASE_UNIT 0xBD
#define SECURITY_FREEZE_LOCK 0xBE
#define SECURITY_DISABLE_PASSWORD 0xBF
#endif /* __KERNEL__ */
struct hd_geometry {
unsigned char heads;
@@ -448,6 +388,7 @@ enum {
#define __NEW_HD_DRIVE_ID
#ifndef __KERNEL__
/*
* Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec.
*
@@ -699,6 +640,7 @@ struct hd_driveid {
* 7:0 Signature
*/
};
#endif /* __KERNEL__ */
/*
* IDE "nice" flags. These are used on a per drive basis to determine

View File

@@ -270,6 +270,7 @@ struct hid_item {
#define HID_QUIRK_INVERT 0x00000001
#define HID_QUIRK_NOTOUCH 0x00000002
#define HID_QUIRK_IGNORE 0x00000004
#define HID_QUIRK_NOGET 0x00000008
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
@@ -603,12 +604,17 @@ struct hid_ll_driver {
int (*open)(struct hid_device *hdev);
void (*close)(struct hid_device *hdev);
int (*power)(struct hid_device *hdev, int level);
int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
unsigned int code, int value);
int (*parse)(struct hid_device *hdev);
};
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
/* We ignore a few input applications that are not widely used */
#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
void hid_output_report(struct hid_report *report, __u8 *data);
struct hid_device *hid_allocate_device(void);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
/**
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...)
__FILE__ , ## arg)
#endif /* HID_FF */
#ifdef __KERNEL__
#ifdef CONFIG_HID_COMPAT
#define HID_COMPAT_LOAD_DRIVER(name) \
/* prototype to avoid sparse warning */ \
extern void hid_compat_##name(void); \
void hid_compat_##name(void) { } \
EXPORT_SYMBOL(hid_compat_##name)
#else
#define HID_COMPAT_LOAD_DRIVER(name)
#endif /* HID_COMPAT */
#define HID_COMPAT_CALL_DRIVER(name) do { \
extern void hid_compat_##name(void); \
hid_compat_##name(); \
} while (0)
#endif /* __KERNEL__ */
#endif

View File

@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page)
}
#endif
#ifdef CONFIG_HIGHMEM
#include <asm/kmap_types.h>
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
void debug_kmap_atomic(enum km_type type);
#else
static inline void debug_kmap_atomic(enum km_type type)
{
}
#endif
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
/* declarations for linux/mm/highmem.c */
@@ -44,8 +57,6 @@ static inline void *kmap(struct page *page)
#define kunmap(page) do { (void) (page); } while (0)
#include <asm/kmap_types.h>
static inline void *kmap_atomic(struct page *page, enum km_type idx)
{
pagefault_disable();

View File

@@ -1,7 +1,14 @@
#ifndef _LINUX_I2C_ALGO_PCA_H
#define _LINUX_I2C_ALGO_PCA_H
/* Clock speeds for the bus */
/* Chips known to the pca algo */
#define I2C_PCA_CHIP_9564 0x00
#define I2C_PCA_CHIP_9665 0x01
/* Internal period for PCA9665 oscilator */
#define I2C_PCA_OSC_PER 3 /* e10-8s */
/* Clock speeds for the bus for PCA9564*/
#define I2C_PCA_CON_330kHz 0x00
#define I2C_PCA_CON_288kHz 0x01
#define I2C_PCA_CON_217kHz 0x02
@@ -18,6 +25,26 @@
#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */
#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */
/* PCA9665 registers */
#define I2C_PCA_INDPTR 0x00 /* INDIRECT Pointer Write Only */
#define I2C_PCA_IND 0x02 /* INDIRECT Read/Write */
/* PCA9665 indirect registers */
#define I2C_PCA_ICOUNT 0x00 /* Byte Count for buffered mode */
#define I2C_PCA_IADR 0x01 /* OWN ADR */
#define I2C_PCA_ISCLL 0x02 /* SCL LOW period */
#define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */
#define I2C_PCA_ITO 0x04 /* TIMEOUT */
#define I2C_PCA_IPRESET 0x05 /* Parallel bus reset */
#define I2C_PCA_IMODE 0x06 /* I2C Bus mode */
/* PCA9665 I2C bus mode */
#define I2C_PCA_MODE_STD 0x00 /* Standard mode */
#define I2C_PCA_MODE_FAST 0x01 /* Fast mode */
#define I2C_PCA_MODE_FASTP 0x02 /* Fast Plus mode */
#define I2C_PCA_MODE_TURBO 0x03 /* Turbo mode */
#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */
#define I2C_PCA_CON_ENSIO 0x40 /* Enable */
#define I2C_PCA_CON_STA 0x20 /* Start */
@@ -31,7 +58,9 @@ struct i2c_algo_pca_data {
int (*read_byte) (void *data, int reg);
int (*wait_for_completion) (void *data);
void (*reset_chip) (void *data);
/* i2c_clock values are defined in linux/i2c-algo-pca.h */
/* For PCA9564, use one of the predefined frequencies:
* 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
};

View File

@@ -71,6 +71,7 @@
#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */
#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
#define I2C_DRIVERID_AU8522 97 /* Auvitek au8522 */
#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
@@ -87,6 +88,7 @@
#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */
#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */
#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */
#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */
/* --- SGI adapters */
#define I2C_HW_SGI_VINO 0x160000

View File

@@ -6,7 +6,7 @@ struct i2c_pca9564_pf_platform_data {
* not supplied (negative value), but it
* cannot exit some error conditions then */
int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */
int timeout; /* timeout = this value * 10us */
int timeout; /* timeout in jiffies */
};
#endif /* I2C_PCA9564_PLATFORM_H */

View File

@@ -2,6 +2,7 @@
#define _LINUX_AT24_H
#include <linux/types.h>
#include <linux/memory.h>
/*
* As seen through Linux I2C, differences between the most common types of I2C
@@ -23,6 +24,9 @@ struct at24_platform_data {
#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
void (*setup)(struct memory_accessor *, void *context);
void *context;
};
#endif /* _LINUX_AT24_H */

View File

@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
/*----------------------------------------------------------------------*/
/* Power bus message definitions */
#define DEV_GRP_NULL 0x0
#define DEV_GRP_P1 0x1
#define DEV_GRP_P2 0x2
#define DEV_GRP_P3 0x4
#define RES_GRP_RES 0x0
#define RES_GRP_PP 0x1
#define RES_GRP_RC 0x2
#define RES_GRP_PP_RC 0x3
#define RES_GRP_PR 0x4
#define RES_GRP_PP_PR 0x5
#define RES_GRP_RC_PR 0x6
#define RES_GRP_ALL 0x7
#define RES_TYPE2_R0 0x0
#define RES_TYPE_ALL 0x7
#define RES_STATE_WRST 0xF
#define RES_STATE_ACTIVE 0xE
#define RES_STATE_SLEEP 0x8
#define RES_STATE_OFF 0x0
/*
* Power Bus Message Format ... these can be sent individually by Linux,
* but are usually part of downloaded scripts that are run when various
* power events are triggered.
*
* Broadcast Message (16 Bits):
* DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4]
* RES_STATE[3:0]
*
* Singular Message (16 Bits):
* DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0]
*/
#define MSG_BROADCAST(devgrp, grp, type, type2, state) \
( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \
| (type) << 4 | (state))
#define MSG_SINGULAR(devgrp, id, state) \
((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
/*----------------------------------------------------------------------*/
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;

View File

@@ -207,7 +207,7 @@ struct tok_info {
unsigned short exsap_station_id;
unsigned short global_int_enable;
struct sk_buff *current_skb;
struct net_device_stats tr_stats;
unsigned char auto_speedsave;
open_state open_status, sap_status;
enum {MANUAL, AUTOMATIC} open_mode;

View File

@@ -26,7 +26,7 @@
#include <asm/io.h>
#include <asm/mutex.h>
#if defined(CONFIG_CRIS) || defined(CONFIG_FRV)
#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
# define SUPPORT_VLB_SYNC 0
#else
# define SUPPORT_VLB_SYNC 1
@@ -40,6 +40,13 @@
#define ERROR_RESET 3 /* Reset controller every 4th retry */
#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
/* Error codes returned in rq->errors to the higher part of the driver. */
enum {
IDE_DRV_ERROR_GENERAL = 101,
IDE_DRV_ERROR_FILEMARK = 102,
IDE_DRV_ERROR_EOD = 103,
};
/*
* Definitions for accessing IDE controller registers
*/
@@ -193,42 +200,8 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
hw->io_ports.ctl_addr = ctl_addr;
}
/* for IDE PCI controllers in legacy mode, temporary */
static inline int __ide_default_irq(unsigned long base)
{
switch (base) {
#ifdef CONFIG_IA64
case 0x1f0: return isa_irq_to_vector(14);
case 0x170: return isa_irq_to_vector(15);
#else
case 0x1f0: return 14;
case 0x170: return 15;
#endif
}
return 0;
}
#if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \
defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \
|| defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64)
#include <asm/ide.h>
#else
#include <asm-generic/ide_iops.h>
#endif
#define MAX_HWIFS 10
/* Currently only m68k, apus and m8xx need it */
#ifndef IDE_ARCH_ACK_INTR
# define ide_ack_intr(hwif) (1)
#endif
/* Currently only Atari needs it */
#ifndef IDE_ARCH_LOCK
# define ide_release_lock() do {} while (0)
# define ide_get_lock(hdlr, data) do {} while (0)
#endif /* IDE_ARCH_LOCK */
/*
* Now for the data we need to maintain per-drive: ide_drive_t
*/
@@ -268,56 +241,52 @@ typedef enum {
enum {
IDE_TFLAG_LBA48 = (1 << 0),
IDE_TFLAG_FLAGGED = (1 << 2),
IDE_TFLAG_OUT_DATA = (1 << 3),
IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
IDE_TFLAG_OUT_HOB_NSECT = (1 << 5),
IDE_TFLAG_OUT_HOB_LBAL = (1 << 6),
IDE_TFLAG_OUT_HOB_LBAM = (1 << 7),
IDE_TFLAG_OUT_HOB_LBAH = (1 << 8),
IDE_TFLAG_OUT_HOB_FEATURE = (1 << 1),
IDE_TFLAG_OUT_HOB_NSECT = (1 << 2),
IDE_TFLAG_OUT_HOB_LBAL = (1 << 3),
IDE_TFLAG_OUT_HOB_LBAM = (1 << 4),
IDE_TFLAG_OUT_HOB_LBAH = (1 << 5),
IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE |
IDE_TFLAG_OUT_HOB_NSECT |
IDE_TFLAG_OUT_HOB_LBAL |
IDE_TFLAG_OUT_HOB_LBAM |
IDE_TFLAG_OUT_HOB_LBAH,
IDE_TFLAG_OUT_FEATURE = (1 << 9),
IDE_TFLAG_OUT_NSECT = (1 << 10),
IDE_TFLAG_OUT_LBAL = (1 << 11),
IDE_TFLAG_OUT_LBAM = (1 << 12),
IDE_TFLAG_OUT_LBAH = (1 << 13),
IDE_TFLAG_OUT_FEATURE = (1 << 6),
IDE_TFLAG_OUT_NSECT = (1 << 7),
IDE_TFLAG_OUT_LBAL = (1 << 8),
IDE_TFLAG_OUT_LBAM = (1 << 9),
IDE_TFLAG_OUT_LBAH = (1 << 10),
IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE |
IDE_TFLAG_OUT_NSECT |
IDE_TFLAG_OUT_LBAL |
IDE_TFLAG_OUT_LBAM |
IDE_TFLAG_OUT_LBAH,
IDE_TFLAG_OUT_DEVICE = (1 << 14),
IDE_TFLAG_WRITE = (1 << 15),
IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16),
IDE_TFLAG_IN_DATA = (1 << 17),
IDE_TFLAG_CUSTOM_HANDLER = (1 << 18),
IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19),
IDE_TFLAG_IN_HOB_FEATURE = (1 << 20),
IDE_TFLAG_IN_HOB_NSECT = (1 << 21),
IDE_TFLAG_IN_HOB_LBAL = (1 << 22),
IDE_TFLAG_IN_HOB_LBAM = (1 << 23),
IDE_TFLAG_IN_HOB_LBAH = (1 << 24),
IDE_TFLAG_OUT_DEVICE = (1 << 11),
IDE_TFLAG_WRITE = (1 << 12),
IDE_TFLAG_CUSTOM_HANDLER = (1 << 13),
IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 14),
IDE_TFLAG_IN_HOB_ERROR = (1 << 15),
IDE_TFLAG_IN_HOB_NSECT = (1 << 16),
IDE_TFLAG_IN_HOB_LBAL = (1 << 17),
IDE_TFLAG_IN_HOB_LBAM = (1 << 18),
IDE_TFLAG_IN_HOB_LBAH = (1 << 19),
IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL |
IDE_TFLAG_IN_HOB_LBAM |
IDE_TFLAG_IN_HOB_LBAH,
IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE |
IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_ERROR |
IDE_TFLAG_IN_HOB_NSECT |
IDE_TFLAG_IN_HOB_LBA,
IDE_TFLAG_IN_FEATURE = (1 << 1),
IDE_TFLAG_IN_NSECT = (1 << 25),
IDE_TFLAG_IN_LBAL = (1 << 26),
IDE_TFLAG_IN_LBAM = (1 << 27),
IDE_TFLAG_IN_LBAH = (1 << 28),
IDE_TFLAG_IN_ERROR = (1 << 20),
IDE_TFLAG_IN_NSECT = (1 << 21),
IDE_TFLAG_IN_LBAL = (1 << 22),
IDE_TFLAG_IN_LBAM = (1 << 23),
IDE_TFLAG_IN_LBAH = (1 << 24),
IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL |
IDE_TFLAG_IN_LBAM |
IDE_TFLAG_IN_LBAH,
IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT |
IDE_TFLAG_IN_LBA,
IDE_TFLAG_IN_DEVICE = (1 << 29),
IDE_TFLAG_IN_DEVICE = (1 << 25),
IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB |
IDE_TFLAG_IN_HOB,
IDE_TFLAG_TF = IDE_TFLAG_OUT_TF |
@@ -325,15 +294,28 @@ enum {
IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE |
IDE_TFLAG_IN_DEVICE,
/* force 16-bit I/O operations */
IDE_TFLAG_IO_16BIT = (1 << 30),
/* ide_task_t was allocated using kmalloc() */
IDE_TFLAG_DYN = (1 << 31),
IDE_TFLAG_IO_16BIT = (1 << 26),
/* struct ide_cmd was allocated using kmalloc() */
IDE_TFLAG_DYN = (1 << 27),
IDE_TFLAG_FS = (1 << 28),
IDE_TFLAG_MULTI_PIO = (1 << 29),
};
enum {
IDE_FTFLAG_FLAGGED = (1 << 0),
IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
IDE_FTFLAG_OUT_DATA = (1 << 2),
IDE_FTFLAG_IN_DATA = (1 << 3),
};
struct ide_taskfile {
u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */
/* 1-5: additional data to support LBA48 */
union {
u8 hob_error; /* read: error */
u8 hob_feature; /* write: feature */
};
u8 hob_feature; /* 1-5: additional data to support LBA48 */
u8 hob_nsect;
u8 hob_lbal;
u8 hob_lbam;
@@ -359,16 +341,29 @@ struct ide_taskfile {
};
};
typedef struct ide_task_s {
struct ide_cmd {
union {
struct ide_taskfile tf;
u8 tf_array[14];
};
u8 ftf_flags; /* for TASKFILE ioctl */
u32 tf_flags;
int data_phase;
int protocol;
int sg_nents; /* number of sg entries */
int orig_sg_nents;
int sg_dma_direction; /* DMA transfer direction */
unsigned int nbytes;
unsigned int nleft;
unsigned int last_xfer_len;
struct scatterlist *cursg;
unsigned int cursg_ofs;
struct request *rq; /* copy of request */
void *special; /* valid_t generally */
} ide_task_t;
};
/* ATAPI packet command flags */
enum {
@@ -380,15 +375,13 @@ enum {
PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
PC_FLAG_DMA_ERROR = (1 << 5),
PC_FLAG_WRITING = (1 << 6),
/* command timed out */
PC_FLAG_TIMEDOUT = (1 << 7),
};
/*
* With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
* This is used for several packet commands (not for READ/WRITE commands).
*/
#define IDE_PC_BUFFER_SIZE 256
#define IDE_PC_BUFFER_SIZE 64
#define ATAPI_WAIT_PC (60 * HZ)
struct ide_atapi_pc {
@@ -426,9 +419,6 @@ struct ide_atapi_pc {
struct idetape_bh *bh;
char *b_data;
struct scatterlist *sg;
unsigned int sg_cnt;
unsigned long timeout;
};
@@ -452,7 +442,6 @@ struct ide_disk_ops {
int);
ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
sector_t);
int (*end_request)(struct ide_drive_s *, int, int);
int (*ioctl)(struct ide_drive_s *, struct block_device *,
fmode_t, unsigned int, unsigned long);
};
@@ -470,11 +459,6 @@ enum {
IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
/* TOC track numbers are in BCD. */
IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
/*
* Drive does not provide data in multiples of SECTOR_SIZE
* when more than one interrupt is needed.
*/
IDE_AFLAG_LIMIT_NFRAMES = (1 << 5),
/* Saved TOC information is current. */
IDE_AFLAG_TOC_VALID = (1 << 6),
/* We think that the drive door is locked. */
@@ -528,8 +512,6 @@ enum {
IDE_DFLAG_NICE1 = (1 << 5),
/* device is physically present */
IDE_DFLAG_PRESENT = (1 << 6),
/* device ejected hint */
IDE_DFLAG_DEAD = (1 << 7),
/* id read from device (synthetic if not set) */
IDE_DFLAG_ID_READ = (1 << 8),
IDE_DFLAG_NOPROBE = (1 << 9),
@@ -621,7 +603,7 @@ struct ide_drive_s {
unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
unsigned int cyl; /* "real" number of cyls */
unsigned int drive_data; /* used by set_pio_mode/selectproc */
unsigned int drive_data; /* used by set_pio_mode/dev_select() */
unsigned int failures; /* current failure count */
unsigned int max_failures; /* maximum allowed failure count */
u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
@@ -643,8 +625,11 @@ struct ide_drive_s {
/* current packet command */
struct ide_atapi_pc *pc;
/* last failed packet command */
struct ide_atapi_pc *failed_pc;
/* callback for packet commands */
void (*pc_callback)(struct ide_drive_s *, int);
int (*pc_callback)(struct ide_drive_s *, int);
void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *);
int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *,
@@ -674,16 +659,16 @@ struct ide_tp_ops {
void (*exec_command)(struct hwif_s *, u8);
u8 (*read_status)(struct hwif_s *);
u8 (*read_altstatus)(struct hwif_s *);
void (*write_devctl)(struct hwif_s *, u8);
void (*set_irq)(struct hwif_s *, int);
void (*dev_select)(ide_drive_t *);
void (*tf_load)(ide_drive_t *, struct ide_cmd *);
void (*tf_read)(ide_drive_t *, struct ide_cmd *);
void (*tf_load)(ide_drive_t *, struct ide_task_s *);
void (*tf_read)(ide_drive_t *, struct ide_task_s *);
void (*input_data)(ide_drive_t *, struct request *, void *,
unsigned int);
void (*output_data)(ide_drive_t *, struct request *, void *,
unsigned int);
void (*input_data)(ide_drive_t *, struct ide_cmd *,
void *, unsigned int);
void (*output_data)(ide_drive_t *, struct ide_cmd *,
void *, unsigned int);
};
extern const struct ide_tp_ops default_tp_ops;
@@ -694,7 +679,6 @@ extern const struct ide_tp_ops default_tp_ops;
* @init_dev: host specific initialization of a device
* @set_pio_mode: routine to program host for PIO mode
* @set_dma_mode: routine to program host for DMA mode
* @selectproc: tweaks hardware to select drive
* @reset_poll: chipset polling based on hba specifics
* @pre_reset: chipset specific changes to default for device-hba resets
* @resetproc: routine to reset controller after a disk reset
@@ -711,7 +695,6 @@ struct ide_port_ops {
void (*init_dev)(ide_drive_t *);
void (*set_pio_mode)(ide_drive_t *, const u8);
void (*set_dma_mode)(ide_drive_t *, const u8);
void (*selectproc)(ide_drive_t *);
int (*reset_poll)(ide_drive_t *);
void (*pre_reset)(ide_drive_t *);
void (*resetproc)(ide_drive_t *);
@@ -727,13 +710,15 @@ struct ide_port_ops {
struct ide_dma_ops {
void (*dma_host_set)(struct ide_drive_s *, int);
int (*dma_setup)(struct ide_drive_s *);
void (*dma_exec_cmd)(struct ide_drive_s *, u8);
int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
void (*dma_start)(struct ide_drive_s *);
int (*dma_end)(struct ide_drive_s *);
int (*dma_test_irq)(struct ide_drive_s *);
void (*dma_lost_irq)(struct ide_drive_s *);
void (*dma_timeout)(struct ide_drive_s *);
/* below ones are optional */
int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
int (*dma_timer_expiry)(struct ide_drive_s *);
void (*dma_clear)(struct ide_drive_s *);
/*
* The following method is optional and only required to be
* implemented for the SFF-8038i compatible controllers.
@@ -796,19 +781,8 @@ typedef struct hwif_s {
/* Scatter-gather list used to build the above */
struct scatterlist *sg_table;
int sg_max_nents; /* Maximum number of entries in it */
int sg_nents; /* Current number of entries in it */
int orig_sg_nents;
int sg_dma_direction; /* dma transfer direction */
/* data phase of the active command (currently only valid for PIO/DMA) */
int data_phase;
struct ide_task_s task; /* current command */
unsigned int nsect;
unsigned int nleft;
struct scatterlist *cursg;
unsigned int cursg_ofs;
struct ide_cmd cmd; /* current command */
int rqsize; /* max sectors per request */
int irq; /* our irq number */
@@ -866,9 +840,18 @@ struct ide_host {
ide_hwif_t *ports[MAX_HOST_PORTS + 1];
unsigned int n_ports;
struct device *dev[2];
unsigned int (*init_chipset)(struct pci_dev *);
int (*init_chipset)(struct pci_dev *);
void (*get_lock)(irq_handler_t, void *);
void (*release_lock)(void);
irq_handler_t irq_handler;
unsigned long host_flags;
int irq_flags;
void *host_priv;
ide_hwif_t *cur_port; /* for hosts requiring serialization */
@@ -885,7 +868,7 @@ typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
typedef int (ide_expiry_t)(ide_drive_t *);
/* used by ide-cd, ide-floppy, etc. */
typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned);
typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
extern struct mutex ide_setting_mtx;
@@ -1061,10 +1044,11 @@ enum {
};
/* DRV_NAME has to be defined in the driver before using the macro below */
#define __ide_debug_log(lvl, fmt, args...) \
{ \
if (unlikely(drive->debug_mask & lvl)) \
printk(KERN_INFO DRV_NAME ": " fmt, ## args); \
#define __ide_debug_log(lvl, fmt, args...) \
{ \
if (unlikely(drive->debug_mask & lvl)) \
printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
__func__, ## args); \
}
/*
@@ -1103,7 +1087,7 @@ int generic_ide_resume(struct device *);
void ide_complete_power_step(ide_drive_t *, struct request *);
ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
void ide_complete_pm_request(ide_drive_t *, struct request *);
void ide_complete_pm_rq(ide_drive_t *, struct request *);
void ide_check_pm_state(ide_drive_t *, struct request *);
/*
@@ -1115,7 +1099,6 @@ void ide_check_pm_state(ide_drive_t *, struct request *);
struct ide_driver {
const char *version;
ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
int (*end_request)(ide_drive_t *, int, int);
struct device_driver gen_driver;
int (*probe)(ide_drive_t *);
void (*remove)(ide_drive_t *);
@@ -1146,16 +1129,15 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
extern int ide_vlb_clk;
extern int ide_pci_clk;
extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
int uptodate, int nr_sectors);
unsigned int ide_rq_bytes(struct request *);
int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
void ide_kill_rq(ide_drive_t *, struct request *);
extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry);
void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
ide_expiry_t *);
void ide_execute_pkt_cmd(ide_drive_t *);
void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
unsigned int);
void ide_pad_transfer(ide_drive_t *, int, int);
@@ -1169,41 +1151,36 @@ int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
extern ide_startstop_t ide_do_reset (ide_drive_t *);
extern int ide_devset_execute(ide_drive_t *drive,
const struct ide_devset *setting, int arg);
extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
int ide_complete_rq(ide_drive_t *, int, unsigned int);
void ide_tf_dump(const char *, struct ide_taskfile *);
void ide_exec_command(ide_hwif_t *, u8);
u8 ide_read_status(ide_hwif_t *);
u8 ide_read_altstatus(ide_hwif_t *);
void ide_write_devctl(ide_hwif_t *, u8);
void ide_set_irq(ide_hwif_t *, int);
void ide_dev_select(ide_drive_t *);
void ide_tf_load(ide_drive_t *, struct ide_cmd *);
void ide_tf_read(ide_drive_t *, struct ide_cmd *);
void ide_tf_load(ide_drive_t *, ide_task_t *);
void ide_tf_read(ide_drive_t *, ide_task_t *);
void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int);
void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int);
int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int);
extern void SELECT_DRIVE(ide_drive_t *);
void SELECT_MASK(ide_drive_t *, int);
u8 ide_read_error(ide_drive_t *);
void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
extern int drive_is_ready(ide_drive_t *);
void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
int ide_check_atapi_device(ide_drive_t *, const char *);
void ide_init_pc(struct ide_atapi_pc *);
@@ -1240,16 +1217,20 @@ int ide_cd_expiry(ide_drive_t *);
int ide_cd_get_xferlen(struct request *);
ide_startstop_t ide_issue_pc(ide_drive_t *);
ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
void task_end_request(ide_drive_t *, struct request *, u8);
void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16);
int ide_no_data_taskfile(ide_drive_t *, ide_task_t *);
void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
int ide_dev_read_id(ide_drive_t *, u8, u16 *);
extern int ide_driveid_update(ide_drive_t *);
extern int ide_config_drive_speed(ide_drive_t *, u8);
@@ -1280,7 +1261,7 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
return 0;
}
void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
hw_regs_t *, hw_regs_t **);
void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
@@ -1349,10 +1330,10 @@ enum {
IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
/* serialize ports */
IDE_HFLAG_SERIALIZE = (1 << 20),
/* use legacy IRQs */
IDE_HFLAG_LEGACY_IRQS = (1 << 21),
/* force use of legacy IRQs */
IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
/* host is DTC2278 */
IDE_HFLAG_DTC2278 = (1 << 21),
/* 4 devices on a single set of I/O ports */
IDE_HFLAG_4DRIVES = (1 << 22),
/* host is TRM290 */
IDE_HFLAG_TRM290 = (1 << 23),
/* use 32-bit I/O ops */
@@ -1380,7 +1361,12 @@ enum {
struct ide_port_info {
char *name;
unsigned int (*init_chipset)(struct pci_dev *);
int (*init_chipset)(struct pci_dev *);
void (*get_lock)(irq_handler_t, void *);
void (*release_lock)(void);
void (*init_iops)(ide_hwif_t *);
void (*init_hwif)(ide_hwif_t *);
int (*init_dma)(ide_hwif_t *,
@@ -1397,6 +1383,9 @@ struct ide_port_info {
u16 max_sectors; /* if < than the default one */
u32 host_flags;
int irq_flags;
u8 pio_mask;
u8 swdma_mask;
u8 mwdma_mask;
@@ -1416,8 +1405,8 @@ int ide_pci_resume(struct pci_dev *);
#define ide_pci_resume NULL
#endif
void ide_map_sg(ide_drive_t *, struct request *);
void ide_init_sg_cmd(ide_drive_t *, struct request *);
void ide_map_sg(ide_drive_t *, struct ide_cmd *);
void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
#define BAD_DMA_DRIVE 0
#define GOOD_DMA_DRIVE 1
@@ -1451,18 +1440,18 @@ ide_startstop_t ide_dma_intr(ide_drive_t *);
int ide_allocate_dma_engine(ide_hwif_t *);
void ide_release_dma_engine(ide_hwif_t *);
int ide_build_sglist(ide_drive_t *, struct request *);
void ide_destroy_dmatable(ide_drive_t *);
int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
int config_drive_for_dma(ide_drive_t *);
extern int ide_build_dmatable(ide_drive_t *, struct request *);
int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
void ide_dma_host_set(ide_drive_t *, int);
extern int ide_dma_setup(ide_drive_t *);
void ide_dma_exec_cmd(ide_drive_t *, u8);
int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
extern void ide_dma_start(ide_drive_t *);
int ide_dma_end(ide_drive_t *);
int ide_dma_test_irq(ide_drive_t *);
int ide_dma_sff_timer_expiry(ide_drive_t *);
u8 ide_dma_sff_read_status(ide_hwif_t *);
extern const struct ide_dma_ops sff_dma_ops;
#else
@@ -1470,7 +1459,7 @@ static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
void ide_dma_lost_irq(ide_drive_t *);
void ide_dma_timeout(ide_drive_t *);
ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
#else
static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
@@ -1482,21 +1471,29 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; }
static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
static inline int ide_dma_prepare(ide_drive_t *drive,
struct ide_cmd *cmd) { return 1; }
static inline void ide_dma_unmap_sg(ide_drive_t *drive,
struct ide_cmd *cmd) { ; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
#ifdef CONFIG_BLK_DEV_IDEACPI
int ide_acpi_init(void);
extern int ide_acpi_exec_tfs(ide_drive_t *drive);
extern void ide_acpi_get_timing(ide_hwif_t *hwif);
extern void ide_acpi_push_timing(ide_hwif_t *hwif);
extern void ide_acpi_init(ide_hwif_t *hwif);
void ide_acpi_init_port(ide_hwif_t *);
void ide_acpi_port_init_devices(ide_hwif_t *);
extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
#else
static inline int ide_acpi_init(void) { return 0; }
static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_init(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
#endif
@@ -1530,9 +1527,7 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
hwif->hwif_data = data;
}
const char *ide_xfer_verbose(u8 mode);
extern void ide_toggle_bounce(ide_drive_t *drive, int on);
extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate);
u64 ide_get_lba_addr(struct ide_taskfile *, int);
u8 ide_dump_status(ide_drive_t *, const char *, u8);
@@ -1571,14 +1566,18 @@ void ide_timing_merge(struct ide_timing *, struct ide_timing *,
struct ide_timing *, unsigned int);
int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
#ifdef CONFIG_IDE_XFER_MODE
int ide_scan_pio_blacklist(char *);
const char *ide_xfer_verbose(u8);
u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
int ide_set_pio_mode(ide_drive_t *, u8);
int ide_set_dma_mode(ide_drive_t *, u8);
void ide_set_pio(ide_drive_t *, u8);
int ide_set_xfer_rate(ide_drive_t *, u8);
#else
static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
#endif
static inline void ide_set_max_pio(ide_drive_t *drive)
{
@@ -1611,6 +1610,10 @@ static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
#define ide_port_for_each_dev(i, dev, port) \
for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
#define ide_port_for_each_present_dev(i, dev, port) \
for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
#define ide_host_for_each_port(i, port, host) \
for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)

View File

@@ -106,6 +106,7 @@ int idr_get_new(struct idr *idp, void *ptr, int *id);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
void idr_remove_all(struct idr *idp);

View File

@@ -18,6 +18,22 @@
#include <linux/types.h>
#include <asm/byteorder.h>
/*
* DS bit usage
*
* TA = transmitter address
* RA = receiver address
* DA = destination address
* SA = source address
*
* ToDS FromDS A1(RA) A2(TA) A3 A4 Use
* -----------------------------------------------------------------
* 0 0 DA SA BSSID - IBSS/DLS
* 0 1 DA BSSID SA - AP -> STA
* 1 0 BSSID SA DA - AP <- STA
* 1 1 RA TA DA SA unspecified (WDS)
*/
#define FCS_LEN 4
#define IEEE80211_FCTL_VERS 0x0003
@@ -527,6 +543,8 @@ struct ieee80211_tim_ie {
u8 virtual_map[0];
} __attribute__ ((packed));
#define WLAN_SA_QUERY_TR_ID_LEN 16
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -646,6 +664,10 @@ struct ieee80211_mgmt {
u8 action_code;
u8 variable[0];
} __attribute__((packed)) mesh_action;
struct {
u8 action;
u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN];
} __attribute__ ((packed)) sa_query;
} u;
} __attribute__ ((packed)) action;
} u;
@@ -655,6 +677,15 @@ struct ieee80211_mgmt {
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
/* Management MIC information element (IEEE 802.11w) */
struct ieee80211_mmie {
u8 element_id;
u8 length;
__le16 key_id;
u8 sequence_number[6];
u8 mic[8];
} __attribute__ ((packed));
/* Control frames */
struct ieee80211_rts {
__le16 frame_control;
@@ -836,6 +867,7 @@ struct ieee80211_ht_info {
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
#define WLAN_AUTH_FT 2
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -899,6 +931,9 @@ enum ieee80211_statuscode {
/* 802.11g */
WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
/* 802.11w */
WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30,
WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31,
/* 802.11i */
WLAN_STATUS_INVALID_IE = 40,
WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
@@ -1018,6 +1053,8 @@ enum ieee80211_eid {
WLAN_EID_HT_INFORMATION = 61,
/* 802.11i */
WLAN_EID_RSN = 48,
WLAN_EID_TIMEOUT_INTERVAL = 56,
WLAN_EID_MMIE = 76 /* 802.11w */,
WLAN_EID_WPA = 221,
WLAN_EID_GENERIC = 221,
WLAN_EID_VENDOR_SPECIFIC = 221,
@@ -1030,6 +1067,8 @@ enum ieee80211_category {
WLAN_CATEGORY_QOS = 1,
WLAN_CATEGORY_DLS = 2,
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_WMM = 17,
};
@@ -1104,6 +1143,12 @@ struct ieee80211_country_ie_triplet {
};
} __attribute__ ((packed));
enum ieee80211_timeout_interval_type {
WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */,
WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */,
WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */,
};
/* BACK action code */
enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
@@ -1118,6 +1163,13 @@ enum ieee80211_back_parties {
WLAN_BACK_TIMER = 2,
};
/* SA Query action */
enum ieee80211_sa_query_action {
WLAN_ACTION_SA_QUERY_REQUEST = 0,
WLAN_ACTION_SA_QUERY_RESPONSE = 1,
};
/* A-MSDU 802.11n */
#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080
@@ -1128,6 +1180,7 @@ enum ieee80211_back_parties {
/* reserved: 0x000FAC03 */
#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
#define WLAN_MAX_KEY_LEN 32
@@ -1185,4 +1238,149 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
return hdr->addr1;
}
/**
* ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
*/
static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
{
if (ieee80211_is_disassoc(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control))
return true;
if (ieee80211_is_action(hdr->frame_control)) {
u8 *category;
/*
* Action frames, excluding Public Action frames, are Robust
* Management Frames. However, if we are looking at a Protected
* frame, skip the check since the data may be encrypted and
* the frame has already been found to be a Robust Management
* Frame (by the other end).
*/
if (ieee80211_has_protected(hdr->frame_control))
return true;
category = ((u8 *) hdr) + 24;
return *category != WLAN_CATEGORY_PUBLIC;
}
return false;
}
/**
* ieee80211_fhss_chan_to_freq - get channel frequency
* @channel: the FHSS channel
*
* Convert IEEE802.11 FHSS channel to frequency (MHz)
* Ref IEEE 802.11-2007 section 14.6
*/
static inline int ieee80211_fhss_chan_to_freq(int channel)
{
if ((channel > 1) && (channel < 96))
return channel + 2400;
else
return -1;
}
/**
* ieee80211_freq_to_fhss_chan - get channel
* @freq: the channels frequency
*
* Convert frequency (MHz) to IEEE802.11 FHSS channel
* Ref IEEE 802.11-2007 section 14.6
*/
static inline int ieee80211_freq_to_fhss_chan(int freq)
{
if ((freq > 2401) && (freq < 2496))
return freq - 2400;
else
return -1;
}
/**
* ieee80211_dsss_chan_to_freq - get channel center frequency
* @channel: the DSSS channel
*
* Convert IEEE802.11 DSSS channel to the center frequency (MHz).
* Ref IEEE 802.11-2007 section 15.6
*/
static inline int ieee80211_dsss_chan_to_freq(int channel)
{
if ((channel > 0) && (channel < 14))
return 2407 + (channel * 5);
else if (channel == 14)
return 2484;
else
return -1;
}
/**
* ieee80211_freq_to_dsss_chan - get channel
* @freq: the frequency
*
* Convert frequency (MHz) to IEEE802.11 DSSS channel
* Ref IEEE 802.11-2007 section 15.6
*
* This routine selects the channel with the closest center frequency.
*/
static inline int ieee80211_freq_to_dsss_chan(int freq)
{
if ((freq >= 2410) && (freq < 2475))
return (freq - 2405) / 5;
else if ((freq >= 2482) && (freq < 2487))
return 14;
else
return -1;
}
/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back
* Ref IEEE 802.11-2007 section 18.4.6.2
*
* The channels and frequencies are the same as those defined for DSSS
*/
#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan)
#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq)
/* Convert IEEE802.11 ERP channel to frequency (MHz) and back
* Ref IEEE 802.11-2007 section 19.4.2
*/
#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan)
#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq)
/**
* ieee80211_ofdm_chan_to_freq - get channel center frequency
* @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
* @channel: the OFDM channel
*
* Convert IEEE802.11 OFDM channel to center frequency (MHz)
* Ref IEEE 802.11-2007 section 17.3.8.3.2
*/
static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel)
{
if ((channel > 0) && (channel <= 200) &&
(s_freq >= 4000))
return s_freq + (channel * 5);
else
return -1;
}
/**
* ieee80211_freq_to_ofdm_channel - get channel
* @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
* @freq: the frequency
*
* Convert frequency (MHz) to IEEE802.11 OFDM channel
* Ref IEEE 802.11-2007 section 17.3.8.3.2
*
* This routine selects the channel with the closest center frequency.
*/
static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq)
{
if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) &&
(s_freq >= 4000))
return (freq + 2 - s_freq) / 5;
else
return -1;
}
#endif /* LINUX_IEEE80211_H */

View File

@@ -66,6 +66,7 @@
#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */
#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */
#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002

View File

@@ -16,6 +16,7 @@
#ifndef _LINUX_IF_ARCNET_H
#define _LINUX_IF_ARCNET_H
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -57,10 +58,10 @@
*/
struct arc_rfc1201
{
uint8_t proto; /* protocol ID field - varies */
uint8_t split_flag; /* for use with split packets */
__u8 proto; /* protocol ID field - varies */
__u8 split_flag; /* for use with split packets */
__be16 sequence; /* sequence number */
uint8_t payload[0]; /* space remaining in packet (504 bytes)*/
__u8 payload[0]; /* space remaining in packet (504 bytes)*/
};
#define RFC1201_HDR_SIZE 4
@@ -70,8 +71,8 @@ struct arc_rfc1201
*/
struct arc_rfc1051
{
uint8_t proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
uint8_t payload[0]; /* 507 bytes */
__u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
__u8 payload[0]; /* 507 bytes */
};
#define RFC1051_HDR_SIZE 1
@@ -82,20 +83,20 @@ struct arc_rfc1051
*/
struct arc_eth_encap
{
uint8_t proto; /* Always ARC_P_ETHER */
__u8 proto; /* Always ARC_P_ETHER */
struct ethhdr eth; /* standard ethernet header (yuck!) */
uint8_t payload[0]; /* 493 bytes */
__u8 payload[0]; /* 493 bytes */
};
#define ETH_ENCAP_HDR_SIZE 14
struct arc_cap
{
uint8_t proto;
uint8_t cookie[sizeof(int)]; /* Actually NOT sent over the network */
__u8 proto;
__u8 cookie[sizeof(int)]; /* Actually NOT sent over the network */
union {
uint8_t ack;
uint8_t raw[0]; /* 507 bytes */
__u8 ack;
__u8 raw[0]; /* 507 bytes */
} mes;
};
@@ -109,7 +110,7 @@ struct arc_cap
*/
struct arc_hardware
{
uint8_t source, /* source ARCnet - filled in automagically */
__u8 source, /* source ARCnet - filled in automagically */
dest, /* destination ARCnet - 0 for broadcast */
offset[2]; /* offset bytes (some weird semantics) */
};
@@ -130,7 +131,7 @@ struct archdr
struct arc_rfc1051 rfc1051;
struct arc_eth_encap eth_encap;
struct arc_cap cap;
uint8_t raw[0]; /* 508 bytes */
__u8 raw[0]; /* 508 bytes */
} soft;
};

View File

@@ -17,7 +17,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_ETHER_H
#define _LINUX_IF_ETHER_H
@@ -25,7 +25,7 @@
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
* and FCS/CRC (frame check sequence).
* and FCS/CRC (frame check sequence).
*/
#define ETH_ALEN 6 /* Octets in one ethernet addr */
@@ -78,12 +78,13 @@
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
/*
* Non DIX types. Won't clash for 1500 types.
*/
#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
@@ -109,7 +110,7 @@
/*
* This is an Ethernet frame header.
*/
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */

View File

@@ -153,7 +153,6 @@ struct frhdr
struct dlci_local
{
struct net_device_stats stats;
struct net_device *master;
struct net_device *slave;
struct dlci_conf config;

View File

@@ -26,7 +26,7 @@
*/
struct pppol2tp_addr
{
pid_t pid; /* pid that owns the fd.
__kernel_pid_t pid; /* pid that owns the fd.
* 0 => current */
int fd; /* FD of UDP socket to use */

View File

@@ -95,16 +95,16 @@ struct pppoe_tag {
} __attribute ((packed));
/* Tag identifiers */
#define PTT_EOL __constant_htons(0x0000)
#define PTT_SRV_NAME __constant_htons(0x0101)
#define PTT_AC_NAME __constant_htons(0x0102)
#define PTT_HOST_UNIQ __constant_htons(0x0103)
#define PTT_AC_COOKIE __constant_htons(0x0104)
#define PTT_VENDOR __constant_htons(0x0105)
#define PTT_RELAY_SID __constant_htons(0x0110)
#define PTT_SRV_ERR __constant_htons(0x0201)
#define PTT_SYS_ERR __constant_htons(0x0202)
#define PTT_GEN_ERR __constant_htons(0x0203)
#define PTT_EOL __cpu_to_be16(0x0000)
#define PTT_SRV_NAME __cpu_to_be16(0x0101)
#define PTT_AC_NAME __cpu_to_be16(0x0102)
#define PTT_HOST_UNIQ __cpu_to_be16(0x0103)
#define PTT_AC_COOKIE __cpu_to_be16(0x0104)
#define PTT_VENDOR __cpu_to_be16(0x0105)
#define PTT_RELAY_SID __cpu_to_be16(0x0110)
#define PTT_SRV_ERR __cpu_to_be16(0x0201)
#define PTT_SYS_ERR __cpu_to_be16(0x0202)
#define PTT_GEN_ERR __cpu_to_be16(0x0203)
struct pppoe_hdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)

View File

@@ -46,6 +46,8 @@
#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
#define TUNSETTXFILTER _IOW('T', 209, unsigned int)
#define TUNGETIFF _IOR('T', 210, unsigned int)
#define TUNGETSNDBUF _IOR('T', 211, int)
#define TUNSETSNDBUF _IOW('T', 212, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001

View File

@@ -16,14 +16,14 @@
#define SIOCDELPRL (SIOCDEVPRIVATE + 6)
#define SIOCCHGPRL (SIOCDEVPRIVATE + 7)
#define GRE_CSUM __constant_htons(0x8000)
#define GRE_ROUTING __constant_htons(0x4000)
#define GRE_KEY __constant_htons(0x2000)
#define GRE_SEQ __constant_htons(0x1000)
#define GRE_STRICT __constant_htons(0x0800)
#define GRE_REC __constant_htons(0x0700)
#define GRE_FLAGS __constant_htons(0x00F8)
#define GRE_VERSION __constant_htons(0x0007)
#define GRE_CSUM __cpu_to_be16(0x8000)
#define GRE_ROUTING __cpu_to_be16(0x4000)
#define GRE_KEY __cpu_to_be16(0x2000)
#define GRE_SEQ __cpu_to_be16(0x1000)
#define GRE_STRICT __cpu_to_be16(0x0800)
#define GRE_REC __cpu_to_be16(0x0700)
#define GRE_FLAGS __cpu_to_be16(0x00F8)
#define GRE_VERSION __cpu_to_be16(0x0007)
struct ip_tunnel_parm
{

61
include/linux/ima.h Normal file
View File

@@ -0,0 +1,61 @@
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*/
#ifndef _LINUX_IMA_H
#define _LINUX_IMA_H
#include <linux/fs.h>
struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_inode_alloc(struct inode *inode);
extern void ima_inode_free(struct inode *inode);
extern int ima_path_check(struct path *path, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern void ima_shm_check(struct file *file);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
return 0;
}
static inline int ima_inode_alloc(struct inode *inode)
{
return 0;
}
static inline void ima_inode_free(struct inode *inode)
{
return;
}
static inline int ima_path_check(struct path *path, int mask)
{
return 0;
}
static inline void ima_file_free(struct file *file)
{
return;
}
static inline int ima_file_mmap(struct file *file, unsigned long prot)
{
return 0;
}
static inline void ima_shm_check(struct file *file)
{
return;
}
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */

View File

@@ -108,6 +108,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
struct in_ifaddr
{

View File

@@ -147,6 +147,7 @@ extern struct cred init_cred;
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \

View File

@@ -661,6 +661,7 @@ struct input_absinfo {
#define SW_DOCK 0x05 /* set = plugged into dock */
#define SW_LINEOUT_INSERT 0x06 /* set = inserted */
#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */
#define SW_VIDEOOUT_INSERT 0x08 /* set = inserted */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)

View File

@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
@@ -292,6 +292,8 @@ struct intel_iommu {
spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
unsigned int irq;
unsigned char name[13]; /* Device Name */
#ifdef CONFIG_DMAR
unsigned long *domain_ids; /* bitmap of domains */
@@ -299,8 +301,6 @@ struct intel_iommu {
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
unsigned int irq;
unsigned char name[7]; /* Device Name */
struct iommu_flush flush;
#endif
struct q_inval *qi; /* Queued invalidation info */
@@ -321,6 +321,7 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu);
extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void dmar_disable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
@@ -331,11 +332,4 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
#endif

Some files were not shown because too many files have changed in this diff Show More