Merge commit 'v2.6.27-rc1' into x86/core

Conflicts:

	include/asm-x86/dma-mapping.h
	include/asm-x86/namei.h
	include/asm-x86/uaccess.h

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar
2008-07-30 19:33:48 +02:00
2069 changed files with 68780 additions and 24005 deletions

View File

@@ -7,7 +7,6 @@
#include <linux/uio.h>
#include <asm/atomic.h>
#include <linux/uio.h>
#define AIO_MAXSEGS 4
#define AIO_KIOGRP_NR_ATOMIC 8

View File

@@ -205,6 +205,8 @@ void block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int block_read_full_page(struct page*, get_block_t*);
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
unsigned long from);
int block_write_begin(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page **, void **, get_block_t*);

View File

@@ -37,7 +37,7 @@ extern const struct file_operations coda_ioctl_operations;
/* operations shared over more than one file */
int coda_open(struct inode *i, struct file *f);
int coda_release(struct inode *i, struct file *f);
int coda_permission(struct inode *inode, int mask, struct nameidata *nd);
int coda_permission(struct inode *inode, int mask);
int coda_revalidate_inode(struct dentry *);
int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
int coda_setattr(struct dentry *, struct iattr *);

View File

@@ -62,15 +62,7 @@
* int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
*
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
*ifdef CONFIG_HAS_CPUMASK_OF_CPU
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
* cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*else
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
* cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*endif
* (can be used as an lvalue)
* CPU_MASK_ALL Initializer - all bits set
* CPU_MASK_NONE Initializer - no bits set
* unsigned long *cpus_addr(mask) Array of unsigned long's in mask
@@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
/*
* Special-case data structure for "single bit set only" constant CPU masks.
*
* We pre-generate all the 64 (or 32) possible bit positions, with enough
* padding to the left and the right, and return the constant pointer
* appropriately offset.
*/
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
static inline const cpumask_t *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
return (const cpumask_t *)p;
}
/*
* In cases where we take the address of the cpumask immediately,
* gcc optimizes it out (it's a constant) and there's no huge stack
* variable created:
*/
#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); })
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
extern cpumask_t *cpumask_of_cpu_map;
#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
#define cpumask_of_cpu_ptr(v, cpu) \
const cpumask_t *v = &cpumask_of_cpu(cpu)
#define cpumask_of_cpu_ptr_declare(v) \
const cpumask_t *v
#define cpumask_of_cpu_ptr_next(v, cpu) \
v = &cpumask_of_cpu(cpu)
#else
#define cpumask_of_cpu(cpu) \
({ \
typeof(_unused_cpumask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(cpu); \
} else { \
cpus_clear(m); \
cpu_set((cpu), m); \
} \
m; \
})
#define cpumask_of_cpu_ptr(v, cpu) \
cpumask_t _##v = cpumask_of_cpu(cpu); \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_declare(v) \
cpumask_t _##v; \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_next(v, cpu) \
_##v = cpumask_of_cpu(cpu)
#endif
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)

View File

@@ -60,6 +60,8 @@ extern int dir_notify_enable;
#define MAY_WRITE 2
#define MAY_READ 4
#define MAY_APPEND 8
#define MAY_ACCESS 16
#define MAY_OPEN 32
#define FMODE_READ 1
#define FMODE_WRITE 2
@@ -277,7 +279,7 @@ extern int dir_notify_enable;
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/path.h>
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/kobject.h>
@@ -318,22 +320,23 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* Attribute flags. These should be or-ed together to figure out what
* has been changed!
*/
#define ATTR_MODE 1
#define ATTR_UID 2
#define ATTR_GID 4
#define ATTR_SIZE 8
#define ATTR_ATIME 16
#define ATTR_MTIME 32
#define ATTR_CTIME 64
#define ATTR_ATIME_SET 128
#define ATTR_MTIME_SET 256
#define ATTR_FORCE 512 /* Not a change, but a change it */
#define ATTR_ATTR_FLAG 1024
#define ATTR_KILL_SUID 2048
#define ATTR_KILL_SGID 4096
#define ATTR_FILE 8192
#define ATTR_KILL_PRIV 16384
#define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */
#define ATTR_MODE (1 << 0)
#define ATTR_UID (1 << 1)
#define ATTR_GID (1 << 2)
#define ATTR_SIZE (1 << 3)
#define ATTR_ATIME (1 << 4)
#define ATTR_MTIME (1 << 5)
#define ATTR_CTIME (1 << 6)
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
#define ATTR_ATTR_FLAG (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
#define ATTR_KILL_PRIV (1 << 14)
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
#define ATTR_TIMES_SET (1 << 16)
/*
* This is the Inode Attributes structure, used for notify_change(). It
@@ -440,6 +443,27 @@ static inline size_t iov_iter_count(struct iov_iter *i)
return i->count;
}
/*
* "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
* have multiple different users of the data that
* we read from a file.
*
* The simplest case just copies the data to user
* mode.
*/
typedef struct {
size_t written;
size_t count;
union {
char __user *buf;
void *data;
} arg;
int error;
} read_descriptor_t;
typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
unsigned long, unsigned long);
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
@@ -481,6 +505,8 @@ struct address_space_operations {
int (*migratepage) (struct address_space *,
struct page *, struct page *);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
unsigned long);
};
/*
@@ -499,7 +525,7 @@ struct backing_dev_info;
struct address_space {
struct inode *host; /* owner: inode, block_device */
struct radix_tree_root page_tree; /* radix tree of all pages */
rwlock_t tree_lock; /* and rwlock protecting it */
spinlock_t tree_lock; /* and lock protecting it */
unsigned int i_mmap_writable;/* count VM_SHARED mappings */
struct prio_tree_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
@@ -792,7 +818,7 @@ struct file {
#define f_dentry f_path.dentry
#define f_vfsmnt f_path.mnt
const struct file_operations *f_op;
atomic_t f_count;
atomic_long_t f_count;
unsigned int f_flags;
mode_t f_mode;
loff_t f_pos;
@@ -821,8 +847,8 @@ extern spinlock_t files_lock;
#define file_list_lock() spin_lock(&files_lock);
#define file_list_unlock() spin_unlock(&files_lock);
#define get_file(x) atomic_inc(&(x)->f_count)
#define file_count(x) atomic_read(&(x)->f_count)
#define get_file(x) atomic_long_inc(&(x)->f_count)
#define file_count(x) atomic_long_read(&(x)->f_count)
#ifdef CONFIG_DEBUG_WRITECOUNT
static inline void file_take_write(struct file *f)
@@ -1136,7 +1162,7 @@ extern int vfs_permission(struct nameidata *, int);
extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
extern int vfs_mkdir(struct inode *, struct dentry *, int);
extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
extern int vfs_symlink(struct inode *, struct dentry *, const char *, int);
extern int vfs_symlink(struct inode *, struct dentry *, const char *);
extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
extern int vfs_rmdir(struct inode *, struct dentry *);
extern int vfs_unlink(struct inode *, struct dentry *);
@@ -1195,27 +1221,6 @@ struct block_device_operations {
struct module *owner;
};
/*
* "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
* have multiple different users of the data that
* we read from a file.
*
* The simplest case just copies the data to user
* mode.
*/
typedef struct {
size_t written;
size_t count;
union {
char __user * buf;
void *data;
} arg;
int error;
} read_descriptor_t;
typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long);
/* These macros are for out of kernel modules to test that
* the kernel supports the unlocked_ioctl and compat_ioctl
* fields in struct file_operations. */
@@ -1272,7 +1277,7 @@ struct inode_operations {
void * (*follow_link) (struct dentry *, struct nameidata *);
void (*put_link) (struct dentry *, struct nameidata *, void *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int, struct nameidata *);
int (*permission) (struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1696,9 +1701,9 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
extern void make_bad_inode(struct inode *);
extern int is_bad_inode(struct inode *);
extern const struct file_operations read_fifo_fops;
extern const struct file_operations write_fifo_fops;
extern const struct file_operations rdwr_fifo_fops;
extern const struct file_operations read_pipefifo_fops;
extern const struct file_operations write_pipefifo_fops;
extern const struct file_operations rdwr_pipefifo_fops;
extern int fs_may_remount_ro(struct super_block *);
@@ -1767,7 +1772,7 @@ extern int do_remount_sb(struct super_block *sb, int flags,
extern sector_t bmap(struct inode *, sector_t);
#endif
extern int notify_change(struct dentry *, struct iattr *);
extern int permission(struct inode *, int, struct nameidata *);
extern int inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int,
int (*check_acl)(struct inode *, int));
@@ -1831,7 +1836,7 @@ extern void clear_inode(struct inode *);
extern void destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int should_remove_suid(struct dentry *);
extern int remove_suid(struct dentry *);
extern int file_remove_suid(struct file *);
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
extern void remove_inode_hash(struct inode *);

View File

@@ -7,7 +7,7 @@ struct fs_struct {
atomic_t count;
rwlock_t lock;
int umask;
struct path root, pwd, altroot;
struct path root, pwd;
};
#define INIT_FS { \
@@ -19,7 +19,6 @@ struct fs_struct {
extern struct kmem_cache *fs_cachep;
extern void exit_fs(struct task_struct *);
extern void set_fs_altroot(void);
extern void set_fs_root(struct fs_struct *, struct path *);
extern void set_fs_pwd(struct fs_struct *, struct path *);
extern struct fs_struct *copy_fs_struct(struct fs_struct *);

View File

@@ -273,7 +273,10 @@ struct hstate {};
#define huge_page_mask(h) PAGE_MASK
#define huge_page_order(h) 0
#define huge_page_shift(h) PAGE_SHIFT
#define pages_per_huge_page(h) 1
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1;
}
#endif
#endif /* _LINUX_HUGETLB_H */

View File

@@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
}
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
if (!dma_mapping_error(dma_addr)) {
if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
*mptr++ = cpu_to_le32(0x7C020002);

View File

@@ -169,6 +169,13 @@ extern void (*late_time_init)(void);
static initcall_t __initcall_##fn##id __used \
__attribute__((__section__(".initcall" level ".init"))) = fn
/*
* Early initcalls run before initializing SMP.
*
* Only for built-in code, not modules.
*/
#define early_initcall(fn) __define_initcall("early",fn,early)
/*
* A "pure" initcall has no dependencies on anything else, and purely
* initializes variables that couldn't be statically initialized.

View File

@@ -8,3 +8,4 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long align_mask);
extern void iommu_area_free(unsigned long *map, unsigned long start,
unsigned int nr);
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);

View File

@@ -7,9 +7,6 @@
*
* For licensing information, see the file 'LICENCE' in the
* jffs2 directory.
*
* $Id: jffs2.h,v 1.38 2005/09/26 11:37:23 havasi Exp $
*
*/
#ifndef __LINUX_JFFS2_H__

View File

@@ -83,6 +83,7 @@ struct kimage {
unsigned long start;
struct page *control_code_page;
struct page *swap_page;
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
@@ -98,18 +99,20 @@ struct kimage {
unsigned int type : 1;
#define KEXEC_TYPE_DEFAULT 0
#define KEXEC_TYPE_CRASH 1
unsigned int preserve_context : 1;
};
/* kexec interface functions */
extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
extern void machine_kexec(struct kimage *image);
extern int machine_kexec_prepare(struct kimage *image);
extern void machine_kexec_cleanup(struct kimage *image);
extern asmlinkage long sys_kexec_load(unsigned long entry,
unsigned long nr_segments,
struct kexec_segment __user *segments,
unsigned long flags);
extern int kernel_kexec(void);
#ifdef CONFIG_COMPAT
extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
unsigned long nr_segments,
@@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image;
#define kexec_flush_icache_page(page)
#endif
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_ARCH_MASK 0xffff0000
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_PRESERVE_CONTEXT 0x00000002
#define KEXEC_ARCH_MASK 0xffff0000
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
@@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image;
#define KEXEC_ARCH_MIPS_LE (10 << 16)
#define KEXEC_ARCH_MIPS ( 8 << 16)
#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
#define KEXEC_FLAGS KEXEC_ON_CRASH
#else
#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
#endif
#define VMCOREINFO_BYTES (4096)
#define VMCOREINFO_NOTE_NAME "VMCOREINFO"

37
include/linux/mISDNdsp.h Normal file
View File

@@ -0,0 +1,37 @@
#ifndef __mISDNdsp_H__
#define __mISDNdsp_H__
struct mISDN_dsp_element_arg {
char *name;
char *def;
char *desc;
};
struct mISDN_dsp_element {
char *name;
void *(*new)(const char *arg);
void (*free)(void *p);
void (*process_tx)(void *p, unsigned char *data, int len);
void (*process_rx)(void *p, unsigned char *data, int len);
int num_args;
struct mISDN_dsp_element_arg
*args;
};
extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem);
extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem);
struct dsp_features {
int hfc_id; /* unique id to identify the chip (or -1) */
int hfc_dtmf; /* set if HFCmulti card supports dtmf */
int hfc_loops; /* set if card supports tone loops */
int hfc_echocanhw; /* set if card supports echocancelation*/
int pcm_id; /* unique id to identify the pcm bus (or -1) */
int pcm_slots; /* number of slots on the pcm bus */
int pcm_banks; /* number of IO banks of pcm bus */
int unclocked; /* data is not clocked (has jitter/loss) */
int unordered; /* data is unordered (packets have index) */
};
#endif

193
include/linux/mISDNhw.h Normal file
View File

@@ -0,0 +1,193 @@
/*
*
* Author Karsten Keil <kkeil@novell.com>
*
* Basic declarations for the mISDN HW channels
*
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef MISDNHW_H
#define MISDNHW_H
#include <linux/mISDNif.h>
#include <linux/timer.h>
/*
* HW DEBUG 0xHHHHGGGG
* H - hardware driver specific bits
* G - for all drivers
*/
#define DEBUG_HW 0x00000001
#define DEBUG_HW_OPEN 0x00000002
#define DEBUG_HW_DCHANNEL 0x00000100
#define DEBUG_HW_DFIFO 0x00000200
#define DEBUG_HW_BCHANNEL 0x00001000
#define DEBUG_HW_BFIFO 0x00002000
#define MAX_DFRAME_LEN_L1 300
#define MAX_MON_FRAME 32
#define MAX_LOG_SPACE 2048
#define MISDN_COPY_SIZE 32
/* channel->Flags bit field */
#define FLG_TX_BUSY 0 /* tx_buf in use */
#define FLG_TX_NEXT 1 /* next_skb in use */
#define FLG_L1_BUSY 2 /* L1 is permanent busy */
#define FLG_L2_ACTIVATED 3 /* activated from L2 */
#define FLG_OPEN 5 /* channel is in use */
#define FLG_ACTIVE 6 /* channel is activated */
#define FLG_BUSY_TIMER 7
/* channel type */
#define FLG_DCHANNEL 8 /* channel is D-channel */
#define FLG_BCHANNEL 9 /* channel is B-channel */
#define FLG_ECHANNEL 10 /* channel is E-channel */
#define FLG_TRANSPARENT 12 /* channel use transparent data */
#define FLG_HDLC 13 /* channel use hdlc data */
#define FLG_L2DATA 14 /* channel use L2 DATA primitivs */
#define FLG_ORIGIN 15 /* channel is on origin site */
/* channel specific stuff */
/* arcofi specific */
#define FLG_ARCOFI_TIMER 16
#define FLG_ARCOFI_ERROR 17
/* isar specific */
#define FLG_INITIALIZED 16
#define FLG_DLEETX 17
#define FLG_LASTDLE 18
#define FLG_FIRST 19
#define FLG_LASTDATA 20
#define FLG_NMD_DATA 21
#define FLG_FTI_RUN 22
#define FLG_LL_OK 23
#define FLG_LL_CONN 24
#define FLG_DTMFSEND 25
/* workq events */
#define FLG_RECVQUEUE 30
#define FLG_PHCHANGE 31
#define schedule_event(s, ev) do { \
test_and_set_bit(ev, &((s)->Flags)); \
schedule_work(&((s)->workq)); \
} while (0)
struct dchannel {
struct mISDNdevice dev;
u_long Flags;
struct work_struct workq;
void (*phfunc) (struct dchannel *);
u_int state;
void *l1;
/* HW access */
u_char (*read_reg) (void *, u_char);
void (*write_reg) (void *, u_char, u_char);
void (*read_fifo) (void *, u_char *, int);
void (*write_fifo) (void *, u_char *, int);
void *hw;
int slot; /* multiport card channel slot */
struct timer_list timer;
/* receive data */
struct sk_buff *rx_skb;
int maxlen;
/* send data */
struct sk_buff_head squeue;
struct sk_buff_head rqueue;
struct sk_buff *tx_skb;
int tx_idx;
int debug;
/* statistics */
int err_crc;
int err_tx;
int err_rx;
};
typedef int (dchannel_l1callback)(struct dchannel *, u_int);
extern int create_l1(struct dchannel *, dchannel_l1callback *);
/* private L1 commands */
#define INFO0 0x8002
#define INFO1 0x8102
#define INFO2 0x8202
#define INFO3_P8 0x8302
#define INFO3_P10 0x8402
#define INFO4_P8 0x8502
#define INFO4_P10 0x8602
#define LOSTFRAMING 0x8702
#define ANYSIGNAL 0x8802
#define HW_POWERDOWN 0x8902
#define HW_RESET_REQ 0x8a02
#define HW_POWERUP_REQ 0x8b02
#define HW_DEACT_REQ 0x8c02
#define HW_ACTIVATE_REQ 0x8e02
#define HW_D_NOBLOCKED 0x8f02
#define HW_RESET_IND 0x9002
#define HW_POWERUP_IND 0x9102
#define HW_DEACT_IND 0x9202
#define HW_ACTIVATE_IND 0x9302
#define HW_DEACT_CNF 0x9402
#define HW_TESTLOOP 0x9502
#define HW_TESTRX_RAW 0x9602
#define HW_TESTRX_HDLC 0x9702
#define HW_TESTRX_OFF 0x9802
struct layer1;
extern int l1_event(struct layer1 *, u_int);
struct bchannel {
struct mISDNchannel ch;
int nr;
u_long Flags;
struct work_struct workq;
u_int state;
/* HW access */
u_char (*read_reg) (void *, u_char);
void (*write_reg) (void *, u_char, u_char);
void (*read_fifo) (void *, u_char *, int);
void (*write_fifo) (void *, u_char *, int);
void *hw;
int slot; /* multiport card channel slot */
struct timer_list timer;
/* receive data */
struct sk_buff *rx_skb;
int maxlen;
/* send data */
struct sk_buff *next_skb;
struct sk_buff *tx_skb;
struct sk_buff_head rqueue;
int rcount;
int tx_idx;
int debug;
/* statistics */
int err_crc;
int err_tx;
int err_rx;
};
extern int mISDN_initdchannel(struct dchannel *, int, void *);
extern int mISDN_initbchannel(struct bchannel *, int);
extern int mISDN_freedchannel(struct dchannel *);
extern int mISDN_freebchannel(struct bchannel *);
extern void queue_ch_frame(struct mISDNchannel *, u_int,
int, struct sk_buff *);
extern int dchannel_senddata(struct dchannel *, struct sk_buff *);
extern int bchannel_senddata(struct bchannel *, struct sk_buff *);
extern void recv_Dchannel(struct dchannel *);
extern void recv_Bchannel(struct bchannel *);
extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
extern void confirm_Bsend(struct bchannel *bch);
extern int get_next_bframe(struct bchannel *);
extern int get_next_dframe(struct dchannel *);
#endif

487
include/linux/mISDNif.h Normal file
View File

@@ -0,0 +1,487 @@
/*
*
* Author Karsten Keil <kkeil@novell.com>
*
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
*
* This code is free software; you can redistribute it and/or modify
* it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE
* version 2.1 as published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU LESSER GENERAL PUBLIC LICENSE for more details.
*
*/
#ifndef mISDNIF_H
#define mISDNIF_H
#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/socket.h>
/*
* ABI Version 32 bit
*
* <8 bit> Major version
* - changed if any interface become backwards incompatible
*
* <8 bit> Minor version
* - changed if any interface is extended but backwards compatible
*
* <16 bit> Release number
* - should be incremented on every checkin
*/
#define MISDN_MAJOR_VERSION 1
#define MISDN_MINOR_VERSION 0
#define MISDN_RELEASE 18
/* primitives for information exchange
* generell format
* <16 bit 0 >
* <8 bit command>
* BIT 8 = 1 LAYER private
* BIT 7 = 1 answer
* BIT 6 = 1 DATA
* <8 bit target layer mask>
*
* Layer = 00 is reserved for general commands
Layer = 01 L2 -> HW
Layer = 02 HW -> L2
Layer = 04 L3 -> L2
Layer = 08 L2 -> L3
* Layer = FF is reserved for broadcast commands
*/
#define MISDN_CMDMASK 0xff00
#define MISDN_LAYERMASK 0x00ff
/* generell commands */
#define OPEN_CHANNEL 0x0100
#define CLOSE_CHANNEL 0x0200
#define CONTROL_CHANNEL 0x0300
#define CHECK_DATA 0x0400
/* layer 2 -> layer 1 */
#define PH_ACTIVATE_REQ 0x0101
#define PH_DEACTIVATE_REQ 0x0201
#define PH_DATA_REQ 0x2001
#define MPH_ACTIVATE_REQ 0x0501
#define MPH_DEACTIVATE_REQ 0x0601
#define MPH_INFORMATION_REQ 0x0701
#define PH_CONTROL_REQ 0x0801
/* layer 1 -> layer 2 */
#define PH_ACTIVATE_IND 0x0102
#define PH_ACTIVATE_CNF 0x4102
#define PH_DEACTIVATE_IND 0x0202
#define PH_DEACTIVATE_CNF 0x4202
#define PH_DATA_IND 0x2002
#define MPH_ACTIVATE_IND 0x0502
#define MPH_DEACTIVATE_IND 0x0602
#define MPH_INFORMATION_IND 0x0702
#define PH_DATA_CNF 0x6002
#define PH_CONTROL_IND 0x0802
#define PH_CONTROL_CNF 0x4802
/* layer 3 -> layer 2 */
#define DL_ESTABLISH_REQ 0x1004
#define DL_RELEASE_REQ 0x1104
#define DL_DATA_REQ 0x3004
#define DL_UNITDATA_REQ 0x3104
#define DL_INFORMATION_REQ 0x0004
/* layer 2 -> layer 3 */
#define DL_ESTABLISH_IND 0x1008
#define DL_ESTABLISH_CNF 0x5008
#define DL_RELEASE_IND 0x1108
#define DL_RELEASE_CNF 0x5108
#define DL_DATA_IND 0x3008
#define DL_UNITDATA_IND 0x3108
#define DL_INFORMATION_IND 0x0008
/* intern layer 2 managment */
#define MDL_ASSIGN_REQ 0x1804
#define MDL_ASSIGN_IND 0x1904
#define MDL_REMOVE_REQ 0x1A04
#define MDL_REMOVE_IND 0x1B04
#define MDL_STATUS_UP_IND 0x1C04
#define MDL_STATUS_DOWN_IND 0x1D04
#define MDL_STATUS_UI_IND 0x1E04
#define MDL_ERROR_IND 0x1F04
#define MDL_ERROR_RSP 0x5F04
/* DL_INFORMATION_IND types */
#define DL_INFO_L2_CONNECT 0x0001
#define DL_INFO_L2_REMOVED 0x0002
/* PH_CONTROL types */
/* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */
#define DTMF_TONE_VAL 0x2000
#define DTMF_TONE_MASK 0x007F
#define DTMF_TONE_START 0x2100
#define DTMF_TONE_STOP 0x2200
#define DTMF_HFC_COEF 0x4000
#define DSP_CONF_JOIN 0x2403
#define DSP_CONF_SPLIT 0x2404
#define DSP_RECEIVE_OFF 0x2405
#define DSP_RECEIVE_ON 0x2406
#define DSP_ECHO_ON 0x2407
#define DSP_ECHO_OFF 0x2408
#define DSP_MIX_ON 0x2409
#define DSP_MIX_OFF 0x240a
#define DSP_DELAY 0x240b
#define DSP_JITTER 0x240c
#define DSP_TXDATA_ON 0x240d
#define DSP_TXDATA_OFF 0x240e
#define DSP_TX_DEJITTER 0x240f
#define DSP_TX_DEJ_OFF 0x2410
#define DSP_TONE_PATT_ON 0x2411
#define DSP_TONE_PATT_OFF 0x2412
#define DSP_VOL_CHANGE_TX 0x2413
#define DSP_VOL_CHANGE_RX 0x2414
#define DSP_BF_ENABLE_KEY 0x2415
#define DSP_BF_DISABLE 0x2416
#define DSP_BF_ACCEPT 0x2416
#define DSP_BF_REJECT 0x2417
#define DSP_PIPELINE_CFG 0x2418
#define HFC_VOL_CHANGE_TX 0x2601
#define HFC_VOL_CHANGE_RX 0x2602
#define HFC_SPL_LOOP_ON 0x2603
#define HFC_SPL_LOOP_OFF 0x2604
/* DSP_TONE_PATT_ON parameter */
#define TONE_OFF 0x0000
#define TONE_GERMAN_DIALTONE 0x0001
#define TONE_GERMAN_OLDDIALTONE 0x0002
#define TONE_AMERICAN_DIALTONE 0x0003
#define TONE_GERMAN_DIALPBX 0x0004
#define TONE_GERMAN_OLDDIALPBX 0x0005
#define TONE_AMERICAN_DIALPBX 0x0006
#define TONE_GERMAN_RINGING 0x0007
#define TONE_GERMAN_OLDRINGING 0x0008
#define TONE_AMERICAN_RINGPBX 0x000b
#define TONE_GERMAN_RINGPBX 0x000c
#define TONE_GERMAN_OLDRINGPBX 0x000d
#define TONE_AMERICAN_RINGING 0x000e
#define TONE_GERMAN_BUSY 0x000f
#define TONE_GERMAN_OLDBUSY 0x0010
#define TONE_AMERICAN_BUSY 0x0011
#define TONE_GERMAN_HANGUP 0x0012
#define TONE_GERMAN_OLDHANGUP 0x0013
#define TONE_AMERICAN_HANGUP 0x0014
#define TONE_SPECIAL_INFO 0x0015
#define TONE_GERMAN_GASSENBESETZT 0x0016
#define TONE_GERMAN_AUFSCHALTTON 0x0016
/* MPH_INFORMATION_IND */
#define L1_SIGNAL_LOS_OFF 0x0010
#define L1_SIGNAL_LOS_ON 0x0011
#define L1_SIGNAL_AIS_OFF 0x0012
#define L1_SIGNAL_AIS_ON 0x0013
#define L1_SIGNAL_RDI_OFF 0x0014
#define L1_SIGNAL_RDI_ON 0x0015
#define L1_SIGNAL_SLIP_RX 0x0020
#define L1_SIGNAL_SLIP_TX 0x0021
/*
* protocol ids
* D channel 1-31
* B channel 33 - 63
*/
#define ISDN_P_NONE 0
#define ISDN_P_BASE 0
#define ISDN_P_TE_S0 0x01
#define ISDN_P_NT_S0 0x02
#define ISDN_P_TE_E1 0x03
#define ISDN_P_NT_E1 0x04
#define ISDN_P_LAPD_TE 0x10
#define ISDN_P_LAPD_NT 0x11
#define ISDN_P_B_MASK 0x1f
#define ISDN_P_B_START 0x20
#define ISDN_P_B_RAW 0x21
#define ISDN_P_B_HDLC 0x22
#define ISDN_P_B_X75SLP 0x23
#define ISDN_P_B_L2DTMF 0x24
#define ISDN_P_B_L2DSP 0x25
#define ISDN_P_B_L2DSPHDLC 0x26
#define OPTION_L2_PMX 1
#define OPTION_L2_PTP 2
#define OPTION_L2_FIXEDTEI 3
#define OPTION_L2_CLEANUP 4
/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */
#define MISDN_MAX_IDLEN 20
struct mISDNhead {
unsigned int prim;
unsigned int id;
} __attribute__((packed));
#define MISDN_HEADER_LEN sizeof(struct mISDNhead)
#define MAX_DATA_SIZE 2048
#define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN)
#define MAX_DFRAME_LEN 260
#define MISDN_ID_ADDR_MASK 0xFFFF
#define MISDN_ID_TEI_MASK 0xFF00
#define MISDN_ID_SAPI_MASK 0x00FF
#define MISDN_ID_TEI_ANY 0x7F00
#define MISDN_ID_ANY 0xFFFF
#define MISDN_ID_NONE 0xFFFE
#define GROUP_TEI 127
#define TEI_SAPI 63
#define CTRL_SAPI 0
#define MISDN_CHMAP_SIZE 4
#define SOL_MISDN 0
struct sockaddr_mISDN {
sa_family_t family;
unsigned char dev;
unsigned char channel;
unsigned char sapi;
unsigned char tei;
};
/* timer device ioctl */
#define IMADDTIMER _IOR('I', 64, int)
#define IMDELTIMER _IOR('I', 65, int)
/* socket ioctls */
#define IMGETVERSION _IOR('I', 66, int)
#define IMGETCOUNT _IOR('I', 67, int)
#define IMGETDEVINFO _IOR('I', 68, int)
#define IMCTRLREQ _IOR('I', 69, int)
#define IMCLEAR_L2 _IOR('I', 70, int)
struct mISDNversion {
unsigned char major;
unsigned char minor;
unsigned short release;
};
struct mISDN_devinfo {
u_int id;
u_int Dprotocols;
u_int Bprotocols;
u_int protocol;
u_long channelmap[MISDN_CHMAP_SIZE];
u_int nrbchan;
char name[MISDN_MAX_IDLEN];
};
/* CONTROL_CHANNEL parameters */
#define MISDN_CTRL_GETOP 0x0000
#define MISDN_CTRL_LOOP 0x0001
#define MISDN_CTRL_CONNECT 0x0002
#define MISDN_CTRL_DISCONNECT 0x0004
#define MISDN_CTRL_PCMCONNECT 0x0010
#define MISDN_CTRL_PCMDISCONNECT 0x0020
#define MISDN_CTRL_SETPEER 0x0040
#define MISDN_CTRL_UNSETPEER 0x0080
#define MISDN_CTRL_RX_OFF 0x0100
#define MISDN_CTRL_HW_FEATURES_OP 0x2000
#define MISDN_CTRL_HW_FEATURES 0x2001
#define MISDN_CTRL_HFC_OP 0x4000
#define MISDN_CTRL_HFC_PCM_CONN 0x4001
#define MISDN_CTRL_HFC_PCM_DISC 0x4002
#define MISDN_CTRL_HFC_CONF_JOIN 0x4003
#define MISDN_CTRL_HFC_CONF_SPLIT 0x4004
#define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005
#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006
#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007
#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008
/* socket options */
#define MISDN_TIME_STAMP 0x0001
struct mISDN_ctrl_req {
int op;
int channel;
int p1;
int p2;
};
/* muxer options */
#define MISDN_OPT_ALL 1
#define MISDN_OPT_TEIMGR 2
#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <net/sock.h>
#include <linux/completion.h>
#define DEBUG_CORE 0x000000ff
#define DEBUG_CORE_FUNC 0x00000002
#define DEBUG_SOCKET 0x00000004
#define DEBUG_MANAGER 0x00000008
#define DEBUG_SEND_ERR 0x00000010
#define DEBUG_MSG_THREAD 0x00000020
#define DEBUG_QUEUE_FUNC 0x00000040
#define DEBUG_L1 0x0000ff00
#define DEBUG_L1_FSM 0x00000200
#define DEBUG_L2 0x00ff0000
#define DEBUG_L2_FSM 0x00020000
#define DEBUG_L2_CTRL 0x00040000
#define DEBUG_L2_RECV 0x00080000
#define DEBUG_L2_TEI 0x00100000
#define DEBUG_L2_TEIFSM 0x00200000
#define DEBUG_TIMER 0x01000000
#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0])
#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim)
#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id)
/* socket states */
#define MISDN_OPEN 1
#define MISDN_BOUND 2
#define MISDN_CLOSED 3
struct mISDNchannel;
struct mISDNdevice;
struct mISDNstack;
struct channel_req {
u_int protocol;
struct sockaddr_mISDN adr;
struct mISDNchannel *ch;
};
typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *);
typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *);
typedef int (create_func_t)(struct channel_req *);
struct Bprotocol {
struct list_head list;
char *name;
u_int Bprotocols;
create_func_t *create;
};
struct mISDNchannel {
struct list_head list;
u_int protocol;
u_int nr;
u_long opt;
u_int addr;
struct mISDNstack *st;
struct mISDNchannel *peer;
send_func_t *send;
send_func_t *recv;
ctrl_func_t *ctrl;
};
struct mISDN_sock_list {
struct hlist_head head;
rwlock_t lock;
};
struct mISDN_sock {
struct sock sk;
struct mISDNchannel ch;
u_int cmask;
struct mISDNdevice *dev;
};
struct mISDNdevice {
struct mISDNchannel D;
u_int id;
char name[MISDN_MAX_IDLEN];
u_int Dprotocols;
u_int Bprotocols;
u_int nrbchan;
u_long channelmap[MISDN_CHMAP_SIZE];
struct list_head bchannels;
struct mISDNchannel *teimgr;
struct device dev;
};
struct mISDNstack {
u_long status;
struct mISDNdevice *dev;
struct task_struct *thread;
struct completion *notify;
wait_queue_head_t workq;
struct sk_buff_head msgq;
struct list_head layer2;
struct mISDNchannel *layer1;
struct mISDNchannel own;
struct mutex lmutex; /* protect lists */
struct mISDN_sock_list l1sock;
#ifdef MISDN_MSG_STATS
u_int msg_cnt;
u_int sleep_cnt;
u_int stopped_cnt;
#endif
};
/* global alloc/queue dunctions */
static inline struct sk_buff *
mI_alloc_skb(unsigned int len, gfp_t gfp_mask)
{
struct sk_buff *skb;
skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask);
if (likely(skb))
skb_reserve(skb, MISDN_HEADER_LEN);
return skb;
}
static inline struct sk_buff *
_alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask)
{
struct sk_buff *skb = mI_alloc_skb(len, gfp_mask);
struct mISDNhead *hh;
if (!skb)
return NULL;
if (len)
memcpy(skb_put(skb, len), dp, len);
hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = id;
return skb;
}
static inline void
_queue_data(struct mISDNchannel *ch, u_int prim,
u_int id, u_int len, void *dp, gfp_t gfp_mask)
{
struct sk_buff *skb;
if (!ch->peer)
return;
skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask);
if (!skb)
return;
if (ch->recv(ch->peer, skb))
dev_kfree_skb(skb);
}
/* global register/unregister functions */
extern int mISDN_register_device(struct mISDNdevice *, char *name);
extern void mISDN_unregister_device(struct mISDNdevice *);
extern int mISDN_register_Bprotocol(struct Bprotocol *);
extern void mISDN_unregister_Bprotocol(struct Bprotocol *);
extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
#endif /* __KERNEL__ */
#endif /* mISDNIF_H */

View File

@@ -61,8 +61,6 @@ struct maple_device {
struct maple_driver {
unsigned long function;
int (*connect) (struct maple_device * dev);
void (*disconnect) (struct maple_device * dev);
struct device_driver drv;
};

View File

@@ -263,6 +263,10 @@ struct memstick_dev {
/* Get next request from the media driver. */
int (*next_request)(struct memstick_dev *card,
struct memstick_request **mrq);
/* Tell the media driver to stop doing things */
void (*stop)(struct memstick_dev *card);
/* Allow the media driver to continue */
void (*start)(struct memstick_dev *card);
struct device dev;
};
@@ -284,7 +288,7 @@ struct memstick_host {
/* Notify the host that some requests are pending. */
void (*request)(struct memstick_host *host);
/* Set host IO parameters (power, clock, etc). */
void (*set_param)(struct memstick_host *host,
int (*set_param)(struct memstick_host *host,
enum memstick_param param,
int value);
unsigned long private[0] ____cacheline_aligned;

View File

@@ -1,5 +1,3 @@
#ifndef MFD_CORE_H
#define MFD_CORE_H
/*
* drivers/mfd/mfd-core.h
*
@@ -13,6 +11,9 @@
*
*/
#ifndef MFD_CORE_H
#define MFD_CORE_H
#include <linux/platform_device.h>
/*
@@ -28,7 +29,13 @@ struct mfd_cell {
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
void *driver_data; /* driver-specific data */
/* driver-specific data for MFD-aware "cell" drivers */
void *driver_data;
/* platform_data can be used to either pass data to "generic"
driver or as a hook to mfd_cell for the "cell" drivers */
void *platform_data;
size_t data_size;
/*
* This resources can be specified relatievly to the parent device.
@@ -38,18 +45,11 @@ struct mfd_cell {
const struct resource *resources;
};
static inline struct mfd_cell *
mfd_get_cell(struct platform_device *pdev)
{
return (struct mfd_cell *)pdev->dev.platform_data;
}
extern int mfd_add_devices(struct device *parent, int id,
const struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base);
extern int mfd_add_devices(
struct platform_device *parent,
const struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base);
extern void mfd_remove_devices(struct platform_device *parent);
extern void mfd_remove_devices(struct device *parent);
#endif

View File

@@ -164,11 +164,13 @@ enum {
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
};
struct mlx4_wqe_ctrl_seg {
__be32 owner_opcode;
u8 reserved2[3];
__be16 vlan_tag;
u8 ins_vlan;
u8 fence_size;
/*
* High 24 bits are SRC remote buffer; low 8 bits are flags:

View File

@@ -810,7 +810,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -833,6 +832,39 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
/*
* get_user_pages_fast provides equivalent functionality to get_user_pages,
* operating on current and current->mm (force=0 and doesn't return any vmas).
*
* get_user_pages_fast may take mmap_sem and page tables, so no assumptions
* can be made about locking. get_user_pages_fast is to be implemented in a
* way that is advantageous (vs get_user_pages()) when the user memory area is
* already faulted in and present in ptes. However if the pages have to be
* faulted in, it may turn out to be slightly slower).
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
#else
/*
* Should probably be moved to asm-generic, and architectures can include it if
* they don't implement their own get_user_pages_fast.
*/
#define get_user_pages_fast(start, nr_pages, write, pages) \
({ \
struct mm_struct *mm = current->mm; \
int ret; \
\
down_read(&mm->mmap_sem); \
ret = get_user_pages(current, mm, start, nr_pages, \
write, 0, pages, NULL); \
up_read(&mm->mmap_sem); \
\
ret; \
})
#endif
/*
* A callback you can register to apply pressure to ageable caches.
*
@@ -1072,6 +1104,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
#ifdef CONFIG_PROC_FS
/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
extern void added_exe_file_vma(struct mm_struct *mm);

View File

@@ -10,6 +10,7 @@
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -253,6 +254,9 @@ struct mm_struct {
struct file *exe_file;
unsigned long num_exe_file_vmas;
#endif
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
};
#endif /* _LINUX_MM_TYPES_H */

View File

@@ -111,6 +111,8 @@ struct mmc_card {
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
struct sdio_func_tuple *tuples; /* unknown common tuples */
struct dentry *debugfs_root;
};
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)

View File

@@ -157,6 +157,8 @@ struct mmc_host {
struct led_trigger *led; /* activity led */
#endif
struct dentry *debugfs_root;
unsigned long private[0] ____cacheline_aligned;
};

View File

@@ -0,0 +1,279 @@
#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
struct mmu_notifier;
struct mmu_notifier_ops;
#ifdef CONFIG_MMU_NOTIFIER
/*
* The mmu notifier_mm structure is allocated and installed in
* mm->mmu_notifier_mm inside the mm_take_all_locks() protected
* critical section and it's released only when mm_count reaches zero
* in mmdrop().
*/
struct mmu_notifier_mm {
/* all mmu notifiers registerd in this mm are queued in this list */
struct hlist_head list;
/* to serialize the list modifications and hlist_unhashed */
spinlock_t lock;
};
struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
* being destroyed by exit_mmap, always before all pages are
* freed. This can run concurrently with other mmu notifier
* methods (the ones invoked outside the mm context) and it
* should tear down all secondary mmu mappings and freeze the
* secondary mmu. If this method isn't implemented you've to
* be sure that nothing could possibly write to the pages
* through the secondary mmu by the time the last thread with
* tsk->mm == mm exits.
*
* As side note: the pages freed after ->release returns could
* be immediately reallocated by the gart at an alias physical
* address with a different cache model, so if ->release isn't
* implemented because all _software_ driven memory accesses
* through the secondary mmu are terminated by the time the
* last thread of this mm quits, you've also to be sure that
* speculative _hardware_ operations can't allocate dirty
* cachelines in the cpu that could not be snooped and made
* coherent with the other read and write operations happening
* through the gart alias address, so leading to memory
* corruption.
*/
void (*release)(struct mmu_notifier *mn,
struct mm_struct *mm);
/*
* clear_flush_young is called after the VM is
* test-and-clearing the young/accessed bitflag in the
* pte. This way the VM will provide proper aging to the
* accesses to the page through the secondary MMUs and not
* only to the ones through the Linux pte.
*/
int (*clear_flush_young)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address);
/*
* Before this is invoked any secondary MMU is still ok to
* read/write to the page previously pointed to by the Linux
* pte because the page hasn't been freed yet and it won't be
* freed until this returns. If required set_page_dirty has to
* be called internally to this method.
*/
void (*invalidate_page)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address);
/*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the
* locks protecting the reverse maps are held. The subsystem
* must guarantee that no additional references are taken to
* the pages in the range established between the call to
* invalidate_range_start() and the matching call to
* invalidate_range_end().
*
* Invalidation of multiple concurrent ranges may be
* optionally permitted by the driver. Either way the
* establishment of sptes is forbidden in the range passed to
* invalidate_range_begin/end for the whole duration of the
* invalidate_range_begin/end critical section.
*
* invalidate_range_start() is called when all pages in the
* range are still mapped and have at least a refcount of one.
*
* invalidate_range_end() is called when all pages in the
* range have been unmapped and the pages have been freed by
* the VM.
*
* The VM will remove the page table entries and potentially
* the page between invalidate_range_start() and
* invalidate_range_end(). If the page must not be freed
* because of pending I/O or other circumstances then the
* invalidate_range_start() callback (or the initial mapping
* by the driver) must make sure that the refcount is kept
* elevated.
*
* If the driver increases the refcount when the pages are
* initially mapped into an address space then either
* invalidate_range_start() or invalidate_range_end() may
* decrease the refcount. If the refcount is decreased on
* invalidate_range_start() then the VM can free pages as page
* table entries are removed. If the refcount is only
* droppped on invalidate_range_end() then the driver itself
* will drop the last refcount but it must take care to flush
* any secondary tlb before doing the final free on the
* page. Pages will no longer be referenced by the linux
* address space but may still be referenced by sptes until
* the last refcount is dropped.
*/
void (*invalidate_range_start)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end);
void (*invalidate_range_end)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end);
};
/*
* The notifier chains are protected by mmap_sem and/or the reverse map
* semaphores. Notifier chains are only changed when all reverse maps and
* the mmap_sem locks are taken.
*
* Therefore notifier chains can only be traversed when either
*
* 1. mmap_sem is held.
* 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock).
* 3. No other concurrent thread can access the list (release)
*/
struct mmu_notifier {
struct hlist_node hlist;
const struct mmu_notifier_ops *ops;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return unlikely(mm->mmu_notifier_mm);
}
extern int mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm);
extern int __mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm);
extern void mmu_notifier_unregister(struct mmu_notifier *mn,
struct mm_struct *mm);
extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
__mmu_notifier_release(mm);
}
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long address)
{
if (mm_has_notifiers(mm))
return __mmu_notifier_clear_flush_young(mm, address);
return 0;
}
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
if (mm_has_notifiers(mm))
__mmu_notifier_invalidate_page(mm, address);
}
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
__mmu_notifier_invalidate_range_start(mm, start, end);
}
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
__mmu_notifier_invalidate_range_end(mm, start, end);
}
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
mm->mmu_notifier_mm = NULL;
}
static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
__mmu_notifier_mm_destroy(mm);
}
/*
* These two macros will sometime replace ptep_clear_flush.
* ptep_clear_flush is impleemnted as macro itself, so this also is
* implemented as a macro until ptep_clear_flush will converted to an
* inline function, to diminish the risk of compilation failure. The
* invalidate_page method over time can be moved outside the PT lock
* and these two macros can be later removed.
*/
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
({ \
pte_t __pte; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
__pte = ptep_clear_flush(___vma, ___address, __ptep); \
mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
__pte; \
})
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
__young = ptep_clear_flush_young(___vma, ___address, __ptep); \
__young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
___address); \
__young; \
})
#else /* CONFIG_MMU_NOTIFIER */
static inline void mmu_notifier_release(struct mm_struct *mm)
{
}
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long address)
{
return 0;
}
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
}
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
{
}
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define ptep_clear_flush_notify ptep_clear_flush
#endif /* CONFIG_MMU_NOTIFIER */
#endif /* _LINUX_MMU_NOTIFIER_H */

View File

@@ -47,7 +47,7 @@ struct vfsmount {
struct list_head mnt_child; /* and going through their mnt_child */
int mnt_flags;
/* 4 bytes hole on 64bits arches */
char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
struct list_head mnt_share; /* circular list of shared mounts */

View File

@@ -1,6 +1,4 @@
/*
* $Id: blktrans.h,v 1.6 2005/11/07 11:14:54 gleixner Exp $
*
* (C) 2003 David Woodhouse <dwmw2@infradead.org>
*
* Interface to Linux block layer for MTD 'translation layers'.

View File

@@ -1,7 +1,6 @@
/* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm
* $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $
*/
#ifndef __MTD_CFI_H__

View File

@@ -1,8 +1,3 @@
/*
* $Id: cfi_endian.h,v 1.11 2002/01/30 23:20:48 awozniak Exp $
*
*/
#include <asm/byteorder.h>
#ifndef CONFIG_MTD_CFI_ADV_OPTIONS

View File

@@ -4,8 +4,6 @@
* (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
*
* This code is GPL
*
* $Id: concat.h,v 1.1 2002/03/08 16:34:36 rkaiser Exp $
*/
#ifndef MTD_CONCAT_H

View File

@@ -6,8 +6,6 @@
* Copyright (C) 2002-2003 Greg Ungerer <gerg@snapgear.com>
* Copyright (C) 2002-2003 SnapGear Inc
*
* $Id: doc2000.h,v 1.25 2005/11/07 11:14:54 gleixner Exp $
*
* Released under GPL
*/

View File

@@ -5,9 +5,6 @@
* Contains information about the location and state of a given flash device
*
* (C) 2000 Red Hat. GPLd.
*
* $Id: flashchip.h,v 1.18 2005/11/07 11:14:54 gleixner Exp $
*
*/
#ifndef __MTD_FLASHCHIP_H__

View File

@@ -1,6 +1,4 @@
/*
* $Id: ftl.h,v 1.7 2005/11/07 11:14:54 gleixner Exp $
*
* Derived from (and probably identical to):
* ftl.h 1.7 1999/10/25 20:23:17
*

View File

@@ -1,7 +1,6 @@
/*
* (C) 2001, 2001 Red Hat, Inc.
* GPL'd
* $Id: gen_probe.h,v 1.4 2005/11/07 11:14:54 gleixner Exp $
*/
#ifndef __LINUX_MTD_GEN_PROBE_H__

View File

@@ -2,8 +2,6 @@
* inftl.h -- defines to support the Inverse NAND Flash Translation Layer
*
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
*
* $Id: inftl.h,v 1.7 2005/06/13 13:08:45 sean Exp $
*/
#ifndef __MTD_INFTL_H__
@@ -52,8 +50,6 @@ struct INFTLrecord {
int INFTL_mount(struct INFTLrecord *s);
int INFTL_formatblock(struct INFTLrecord *s, int block);
extern char inftlmountrev[];
void INFTL_dumptables(struct INFTLrecord *s);
void INFTL_dumpVUchains(struct INFTLrecord *s);

View File

@@ -1,6 +1,5 @@
/* Overhauled routines for dealing with different mmap regions of flash */
/* $Id: map.h,v 1.54 2005/11/07 11:14:54 gleixner Exp $ */
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__

View File

@@ -1,6 +1,4 @@
/*
* $Id: mtd.h,v 1.61 2005/11/07 11:14:54 gleixner Exp $
*
* Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al.
*
* Released under GPL

View File

@@ -5,8 +5,6 @@
* Steven J. Hill <sjhill@realitydiluted.com>
* Thomas Gleixner <tglx@linutronix.de>
*
* $Id: nand.h,v 1.74 2005/09/15 13:58:50 vwool Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -179,6 +177,7 @@ typedef enum {
#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT))
/* Mask to zero out the chip options, which come from the id table */
#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
@@ -276,6 +275,10 @@ struct nand_ecc_ctrl {
int (*read_page)(struct mtd_info *mtd,
struct nand_chip *chip,
uint8_t *buf);
int (*read_subpage)(struct mtd_info *mtd,
struct nand_chip *chip,
uint32_t offs, uint32_t len,
uint8_t *buf);
void (*write_page)(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf);

View File

@@ -3,8 +3,6 @@
*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
* $Id: nand_ecc.h,v 1.4 2004/06/17 02:35:02 dbrown Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.

View File

@@ -1,6 +1,4 @@
/*
* $Id: nftl.h,v 1.16 2004/06/30 14:49:00 dbrown Exp $
*
* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
*/

View File

@@ -4,8 +4,6 @@
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
* This code is GPL
*
* $Id: partitions.h,v 1.17 2005/11/07 11:14:55 gleixner Exp $
*/
#ifndef MTD_PARTITIONS_H

View File

@@ -2,8 +2,6 @@
* For boards with physically mapped flash and using
* drivers/mtd/maps/physmap.c mapping driver.
*
* $Id: physmap.h,v 1.4 2005/11/07 11:14:55 gleixner Exp $
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*

View File

@@ -6,8 +6,6 @@
*
* Generic platform device based RAM map
*
* $Id: plat-ram.h,v 1.2 2005/01/24 00:37:40 bjd Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.

View File

@@ -1,6 +1,4 @@
/*
* $Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
* Author:
@@ -17,7 +15,7 @@
#include <linux/mtd/mtd.h>
#define PMC551_VERSION "$Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $\n"\
#define PMC551_VERSION \
"Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
/*

View File

@@ -11,8 +11,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* $Id: xip.h,v 1.5 2005/11/07 11:14:55 gleixner Exp $
*/
#ifndef __LINUX_MTD_XIP_H__

View File

@@ -47,27 +47,24 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_DIRECTORY 2
#define LOOKUP_CONTINUE 4
#define LOOKUP_PARENT 16
#define LOOKUP_NOALT 32
#define LOOKUP_REVAL 64
/*
* Intent data
*/
#define LOOKUP_OPEN (0x0100)
#define LOOKUP_CREATE (0x0200)
#define LOOKUP_ACCESS (0x0400)
#define LOOKUP_CHDIR (0x0800)
extern int __user_walk(const char __user *, unsigned, struct nameidata *);
extern int __user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *);
#define user_path_walk(name,nd) \
__user_walk_fd(AT_FDCWD, name, LOOKUP_FOLLOW, nd)
#define user_path_walk_link(name,nd) \
__user_walk_fd(AT_FDCWD, name, 0, nd)
extern int user_path_at(int, const char __user *, unsigned, struct path *);
#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
#define user_path_dir(name, path) \
user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
extern int path_lookup(const char *, unsigned, struct nameidata *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct nameidata *);
extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags);
extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags);
extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
int (*open)(struct inode *, struct file *));

View File

@@ -42,7 +42,6 @@
#include <linux/in.h>
#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
@@ -332,7 +331,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int nfs_permission(struct inode *, int, struct nameidata *);
extern int nfs_permission(struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
extern int nfs_release(struct inode *, struct file *);
extern int nfs_attribute_timeout(struct inode *inode);

View File

@@ -70,5 +70,6 @@ extern int of_n_addr_cells(struct device_node *np);
extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
#endif /* _LINUX_OF_H */

18
include/linux/of_spi.h Normal file
View File

@@ -0,0 +1,18 @@
/*
* OpenFirmware SPI support routines
* Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* Support routines for deriving SPI device attachments from the device
* tree.
*/
#ifndef __LINUX_OF_SPI_H
#define __LINUX_OF_SPI_H
#include <linux/of.h>
#include <linux/spi/spi.h>
extern void of_register_spi_devices(struct spi_master *master,
struct device_node *np);
#endif /* __LINUX_OF_SPI */

View File

@@ -12,6 +12,7 @@
#include <asm/uaccess.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/hardirq.h> /* for in_interrupt() */
/*
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
@@ -19,6 +20,7 @@
*/
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
static inline void mapping_set_error(struct address_space *mapping, int error)
{
@@ -62,6 +64,98 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr, int cold);
/*
* speculatively take a reference to a page.
* If the page is free (_count == 0), then _count is untouched, and 0
* is returned. Otherwise, _count is incremented by 1 and 1 is returned.
*
* This function must be called inside the same rcu_read_lock() section as has
* been used to lookup the page in the pagecache radix-tree (or page table):
* this allows allocators to use a synchronize_rcu() to stabilize _count.
*
* Unless an RCU grace period has passed, the count of all pages coming out
* of the allocator must be considered unstable. page_count may return higher
* than expected, and put_page must be able to do the right thing when the
* page has been finished with, no matter what it is subsequently allocated
* for (because put_page is what is used here to drop an invalid speculative
* reference).
*
* This is the interesting part of the lockless pagecache (and lockless
* get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
* has the following pattern:
* 1. find page in radix tree
* 2. conditionally increment refcount
* 3. check the page is still in pagecache (if no, goto 1)
*
* Remove-side that cares about stability of _count (eg. reclaim) has the
* following (with tree_lock held for write):
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
* B. remove page from pagecache
* C. free the page
*
* There are 2 critical interleavings that matter:
* - 2 runs before A: in this case, A sees elevated refcount and bails out
* - A runs before 2: in this case, 2 sees zero refcount and retries;
* subsequently, B will complete and 1 will find no page, causing the
* lookup to return NULL.
*
* It is possible that between 1 and 2, the page is removed then the exact same
* page is inserted into the same position in pagecache. That's OK: the
* old find_get_page using tree_lock could equally have run before or after
* such a re-insertion, depending on order that locks are granted.
*
* Lookups racing against pagecache insertion isn't a big problem: either 1
* will find the page or it will not. Likewise, the old find_get_page could run
* either before the insertion or afterwards, depending on timing.
*/
static inline int page_cache_get_speculative(struct page *page)
{
VM_BUG_ON(in_interrupt());
#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
# ifdef CONFIG_PREEMPT
VM_BUG_ON(!in_atomic());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
* this for us.
*
* Pagecache won't be truncated from interrupt context, so if we have
* found a page in the radix tree here, we have pinned its refcount by
* disabling preempt, and hence no need for the "speculative get" that
* SMP requires.
*/
VM_BUG_ON(page_count(page) == 0);
atomic_inc(&page->_count);
#else
if (unlikely(!get_page_unless_zero(page))) {
/*
* Either the page has been freed, or will be freed.
* In either case, retry here and the caller should
* do the right thing (see comments above).
*/
return 0;
}
#endif
VM_BUG_ON(PageTail(page));
return 1;
}
static inline int page_freeze_refs(struct page *page, int count)
{
return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
}
static inline void page_unfreeze_refs(struct page *page, int count)
{
VM_BUG_ON(page_count(page) != 0);
VM_BUG_ON(count == 0);
atomic_set(&page->_count, count);
}
#ifdef CONFIG_NUMA
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
@@ -133,13 +227,29 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, filler, data);
}
int add_to_page_cache(struct page *page, struct address_space *mapping,
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run SetPageLocked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
SetPageLocked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
ClearPageLocked(page);
return error;
}
/*
* Return byte-offset into filesystem object for page.
*/

View File

@@ -560,5 +560,8 @@ extern int parport_device_proc_unregister(struct pardevice *device);
#endif /* !CONFIG_PARPORT_NOT_PC */
extern unsigned long parport_default_timeslice;
extern int parport_default_spintime;
#endif /* __KERNEL__ */
#endif /* _PARPORT_H_ */

View File

@@ -27,6 +27,7 @@ extern void pcie_aspm_init_link_state(struct pci_dev *pdev);
extern void pcie_aspm_exit_link_state(struct pci_dev *pdev);
extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
extern void pci_disable_link_state(struct pci_dev *pdev, int state);
extern void pcie_no_aspm(void);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
@@ -40,6 +41,10 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev)
static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
{
}
static inline void pcie_no_aspm(void)
{
}
#endif
#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */

View File

@@ -124,6 +124,8 @@ enum pci_dev_flags {
* generation too.
*/
PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
/* Device configuration is irrevocably lost if disabled into D3 */
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
};
typedef unsigned short __bitwise pci_bus_flags_t;

View File

@@ -748,6 +748,7 @@
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
#define PCI_DEVICE_ID_TI_4450 0x8011
#define PCI_DEVICE_ID_TI_TSB43AB22 0x8023
#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
@@ -1832,7 +1833,13 @@
#define PCI_DEVICE_ID_MOXA_C320 0x3200
#define PCI_VENDOR_ID_CCD 0x1397
#define PCI_DEVICE_ID_CCD_HFC4S 0x08B4
#define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234
#define PCI_DEVICE_ID_CCD_HFC8S 0x16B8
#define PCI_DEVICE_ID_CCD_2BD0 0x2bd0
#define PCI_DEVICE_ID_CCD_HFCE1 0x30B1
#define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136
#define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137
#define PCI_DEVICE_ID_CCD_B000 0xb000
#define PCI_DEVICE_ID_CCD_B006 0xb006
#define PCI_DEVICE_ID_CCD_B007 0xb007
@@ -1842,8 +1849,32 @@
#define PCI_DEVICE_ID_CCD_B00B 0xb00b
#define PCI_DEVICE_ID_CCD_B00C 0xb00c
#define PCI_DEVICE_ID_CCD_B100 0xb100
#define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520
#define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521
#define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522
#define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523
#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540
#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550
#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552
#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560
#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562
#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563
#define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564
#define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565
#define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566
#define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567
#define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568
#define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569
#define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A
#define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B
#define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620
#define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622
#define PCI_DEVICE_ID_CCD_B700 0xb700
#define PCI_DEVICE_ID_CCD_B701 0xb701
#define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523
#define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884
#define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888
#define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998
#define PCI_VENDOR_ID_EXAR 0x13a8
#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152
@@ -2523,6 +2554,9 @@
#define PCI_VENDOR_ID_3COM_2 0xa727
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
#define PCI_SUBVENDOR_ID_EXSYS 0xd84d
#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014
#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055

View File

@@ -374,6 +374,7 @@
#define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */
#define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */
#define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */
#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */
#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCTL 8 /* Device Control */

View File

@@ -74,11 +74,6 @@ struct percpu_data {
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})
extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu);
extern void percpu_depopulate(void *__pdata, int cpu);
extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
cpumask_t *mask);
extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask);
extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
extern void percpu_free(void *__pdata);
@@ -86,26 +81,6 @@ extern void percpu_free(void *__pdata);
#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
static inline void percpu_depopulate(void *__pdata, int cpu)
{
}
static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
{
}
static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp,
int cpu)
{
return percpu_ptr(__pdata, cpu);
}
static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
cpumask_t *mask)
{
return 0;
}
static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
{
return kzalloc(size, gfp);
@@ -118,10 +93,6 @@ static inline void percpu_free(void *__pdata)
#endif /* CONFIG_SMP */
#define percpu_populate_mask(__pdata, size, gfp, mask) \
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
#define percpu_depopulate_mask(__pdata, mask) \
__percpu_depopulate_mask((__pdata), &(mask))
#define percpu_alloc_mask(size, gfp, mask) \
__percpu_alloc_mask((size), (gfp), &(mask))

View File

@@ -282,11 +282,16 @@ union proc_op {
struct task_struct *task);
};
struct ctl_table_header;
struct ctl_table;
struct proc_inode {
struct pid *pid;
int fd;
union proc_op op;
struct proc_dir_entry *pde;
struct ctl_table_header *sysctl;
struct ctl_table *sysctl_entry;
struct inode vfs_inode;
};

View File

@@ -121,6 +121,74 @@ static inline void ptrace_unlink(struct task_struct *child)
int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
/**
* task_ptrace - return %PT_* flags that apply to a task
* @task: pointer to &task_struct in question
*
* Returns the %PT_* flags that apply to @task.
*/
static inline int task_ptrace(struct task_struct *task)
{
return task->ptrace;
}
/**
* ptrace_event - possibly stop for a ptrace event notification
* @mask: %PT_* bit to check in @current->ptrace
* @event: %PTRACE_EVENT_* value to report if @mask is set
* @message: value for %PTRACE_GETEVENTMSG to return
*
* This checks the @mask bit to see if ptrace wants stops for this event.
* If so we stop, reporting @event and @message to the ptrace parent.
*
* Returns nonzero if we did a ptrace notification, zero if not.
*
* Called without locks.
*/
static inline int ptrace_event(int mask, int event, unsigned long message)
{
if (mask && likely(!(current->ptrace & mask)))
return 0;
current->ptrace_message = message;
ptrace_notify((event << 8) | SIGTRAP);
return 1;
}
/**
* ptrace_init_task - initialize ptrace state for a new child
* @child: new child task
* @ptrace: true if child should be ptrace'd by parent's tracer
*
* This is called immediately after adding @child to its parent's children
* list. @ptrace is false in the normal case, and true to ptrace @child.
*
* Called with current's siglock and write_lock_irq(&tasklist_lock) held.
*/
static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
{
INIT_LIST_HEAD(&child->ptrace_entry);
INIT_LIST_HEAD(&child->ptraced);
child->parent = child->real_parent;
child->ptrace = 0;
if (unlikely(ptrace)) {
child->ptrace = current->ptrace;
__ptrace_link(child, current->parent);
}
}
/**
* ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
* @task: task in %EXIT_DEAD state
*
* Called with write_lock(&tasklist_lock) held.
*/
static inline void ptrace_release_task(struct task_struct *task)
{
BUG_ON(!list_empty(&task->ptraced));
ptrace_unlink(task);
BUG_ON(!list_empty(&task->ptrace_entry));
}
#ifndef force_successful_syscall_return
/*
* System call handlers that, upon successful completion, need to return a
@@ -246,6 +314,10 @@ static inline void user_enable_block_step(struct task_struct *task)
#define arch_ptrace_stop(code, info) do { } while (0)
#endif
extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
#endif
#endif

View File

@@ -99,12 +99,15 @@ do { \
*
* The notable exceptions to this rule are the following functions:
* radix_tree_lookup
* radix_tree_lookup_slot
* radix_tree_tag_get
* radix_tree_gang_lookup
* radix_tree_gang_lookup_slot
* radix_tree_gang_lookup_tag
* radix_tree_gang_lookup_tag_slot
* radix_tree_tagged
*
* The first 4 functions are able to be called locklessly, using RCU. The
* The first 7 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently.
@@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items);
unsigned int
radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
unsigned long first_index, unsigned int max_items);
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
@@ -173,6 +179,10 @@ unsigned int
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items,
unsigned int tag);
unsigned int
radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
unsigned long first_index, unsigned int max_items,
unsigned int tag);
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
static inline void radix_tree_preload_end(void)

View File

@@ -97,6 +97,34 @@ static inline void list_del_rcu(struct list_head *entry)
entry->prev = LIST_POISON2;
}
/**
* hlist_del_init_rcu - deletes entry from hash list with re-initialization
* @n: the element to delete from the hash list.
*
* Note: list_unhashed() on the node return true after this. It is
* useful for RCU based read lockfree traversal if the writer side
* must know if the list entry is still hashed or already unhashed.
*
* In particular, it means that we can not poison the forward pointers
* that may still be used for walking the hash list and we can only
* zero the pprev pointer so list_unhashed() will return true after
* this.
*
* The caller must take whatever precautions are necessary (such as
* holding appropriate locks) to avoid racing with another
* list-mutation primitive, such as hlist_add_head_rcu() or
* hlist_del_rcu(), running on this same list. However, it is
* perfectly legal to run concurrently with the _rcu list-traversal
* primitives, such as hlist_for_each_entry_rcu().
*/
static inline void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
n->pprev = NULL;
}
}
/**
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced

View File

@@ -55,7 +55,7 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name);
int reiserfs_delete_xattrs(struct inode *inode);
int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
int reiserfs_permission(struct inode *inode, int mask, struct nameidata *nd);
int reiserfs_permission(struct inode *inode, int mask);
int reiserfs_xattr_del(struct inode *, const char *);
int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t);

View File

@@ -48,6 +48,7 @@ struct rchan_buf
size_t *padding; /* padding counts per sub-buffer */
size_t prev_padding; /* temporary variable */
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
size_t early_bytes; /* bytes consumed before VFS inited */
unsigned int cpu; /* this buf's cpu */
} ____cacheline_aligned;
@@ -68,6 +69,7 @@ struct rchan
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
struct dentry *parent; /* parent dentry passed to open */
int has_base_filename; /* has a filename associated? */
char base_filename[NAME_MAX]; /* saved base filename */
};
@@ -169,6 +171,9 @@ struct rchan *relay_open(const char *base_filename,
size_t n_subbufs,
struct rchan_callbacks *cb,
void *private_data);
extern int relay_late_setup_files(struct rchan *chan,
const char *base_filename,
struct dentry *parent);
extern void relay_close(struct rchan *chan);
extern void relay_flush(struct rchan *chan);
extern void relay_subbufs_consumed(struct rchan *chan,

View File

@@ -26,6 +26,14 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
* head must only be read/written after taking the above lock
* to be sure to see a valid next pointer. The LSB bit itself
* is serialized by a system wide lock only visible to
* mm_take_all_locks() (mm_all_locks_mutex).
*/
struct list_head head; /* List of private "related" vmas */
};

View File

@@ -225,8 +225,6 @@ typedef struct rtc_task {
int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
void rtc_get_rtc_time(struct rtc_time *rtc_tm);
irqreturn_t rtc_interrupt(int irq, void *dev_id);
#endif /* __KERNEL__ */

View File

@@ -755,13 +755,6 @@ extern void __rtnl_unlock(void);
} \
} while(0)
#define BUG_TRAP(x) do { \
if (unlikely(!(x))) { \
printk(KERN_ERR "KERNEL: assertion (%s) failed at %s (%d)\n", \
#x, __FILE__ , __LINE__); \
} \
} while(0)
static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
{
return RTA_GET_U32(rta[RTA_TABLE-1]);

View File

@@ -292,7 +292,6 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_DETECT_SOFTLOCKUP
extern void softlockup_tick(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
@@ -506,9 +505,6 @@ struct signal_struct {
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
#ifdef CONFIG_TASK_XACCT
u64 rchar, wchar, syscr, syscw;
#endif
struct task_io_accounting ioac;
/*
@@ -1257,10 +1253,6 @@ struct task_struct {
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
#ifdef CONFIG_TASK_XACCT
/* i/o counters(bytes read/written, #syscalls */
u64 rchar, wchar, syscr, syscw;
#endif
struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
@@ -1797,7 +1789,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern void do_notify_parent(struct task_struct *, int);
extern int do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
@@ -1883,9 +1875,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void wait_task_inactive(struct task_struct * p);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
#define wait_task_inactive(p) do { } while (0)
static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
return 1;
}
#endif
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
@@ -2139,16 +2135,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
extern void arch_pick_mmap_layout(struct mm_struct *mm);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
}
#endif
#ifdef CONFIG_TRACING
extern void
@@ -2196,22 +2183,22 @@ extern long sched_group_rt_period(struct task_group *tg);
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
tsk->rchar += amt;
tsk->ioac.rchar += amt;
}
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
tsk->wchar += amt;
tsk->ioac.wchar += amt;
}
static inline void inc_syscr(struct task_struct *tsk)
{
tsk->syscr++;
tsk->ioac.syscr++;
}
static inline void inc_syscw(struct task_struct *tsk)
{
tsk->syscw++;
tsk->ioac.syscw++;
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
@@ -2231,14 +2218,6 @@ static inline void inc_syscw(struct task_struct *tsk)
}
#endif
#ifdef CONFIG_SMP
void migration_init(void);
#else
static inline void migration_init(void)
{
}
#endif
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif

View File

@@ -1362,7 +1362,7 @@ struct security_operations {
struct inode *new_dir, struct dentry *new_dentry);
int (*inode_readlink) (struct dentry *dentry);
int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
int (*inode_permission) (struct inode *inode, int mask, struct nameidata *nd);
int (*inode_permission) (struct inode *inode, int mask);
int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
void (*inode_delete) (struct inode *inode);
@@ -1628,7 +1628,7 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int security_inode_readlink(struct dentry *dentry);
int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
int security_inode_permission(struct inode *inode, int mask, struct nameidata *nd);
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
void security_inode_delete(struct inode *inode);
@@ -2021,8 +2021,7 @@ static inline int security_inode_follow_link(struct dentry *dentry,
return 0;
}
static inline int security_inode_permission(struct inode *inode, int mask,
struct nameidata *nd)
static inline int security_inode_permission(struct inode *inode, int mask)
{
return 0;
}

View File

@@ -87,11 +87,10 @@ void serio_unregister_port(struct serio *serio);
void serio_unregister_child_port(struct serio *serio);
int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name);
static inline int serio_register_driver(struct serio_driver *drv)
static inline int __must_check serio_register_driver(struct serio_driver *drv)
{
return __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME);
}
int serio_register_driver(struct serio_driver *drv);
void serio_unregister_driver(struct serio_driver *drv);
static inline int serio_write(struct serio *serio, unsigned char data)

View File

@@ -43,7 +43,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
}
#ifdef CONFIG_TMPFS_POSIX_ACL
int shmem_permission(struct inode *, int, struct nameidata *);
int shmem_permission(struct inode *, int);
int shmem_acl_init(struct inode *, struct inode *);
void shmem_acl_destroy_inode(struct inode *);

View File

@@ -58,7 +58,7 @@ int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(struct kmem_cache *, void *));
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
@@ -96,6 +96,7 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
/*
* Common kmalloc functions provided by all allocators
*/
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
size_t ksize(const void *);

View File

@@ -85,7 +85,7 @@ struct kmem_cache {
struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(struct kmem_cache *, void *);
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
const char *name; /* Name (only for display!) */

View File

@@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data);
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
void generic_smp_call_function_single_interrupt(void);
void generic_smp_call_function_interrupt(void);
void init_call_single_data(void);
void ipi_call_lock(void);
void ipi_call_unlock(void);
void ipi_call_lock_irq(void);
void ipi_call_unlock_irq(void);
#else
static inline void init_call_single_data(void)
{
}
#endif
/*

View File

@@ -189,7 +189,8 @@ struct ucred {
#define AF_BLUETOOTH 31 /* Bluetooth sockets */
#define AF_IUCV 32 /* IUCV sockets */
#define AF_RXRPC 33 /* RxRPC sockets */
#define AF_MAX 34 /* For now.. */
#define AF_ISDN 34 /* mISDN sockets */
#define AF_MAX 35 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -225,6 +226,7 @@ struct ucred {
#define PF_BLUETOOTH AF_BLUETOOTH
#define PF_IUCV AF_IUCV
#define PF_RXRPC AF_RXRPC
#define PF_ISDN AF_ISDN
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */

View File

@@ -778,7 +778,19 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
* normally that would be handled by spi_unregister_master().
*
* You can also use spi_alloc_device() and spi_add_device() to use a two
* stage registration sequence for each spi_device. This gives the caller
* some more control over the spi_device structure before it is registered,
* but requires that caller to initialize fields that would otherwise
* be defined using the board info.
*/
extern struct spi_device *
spi_alloc_device(struct spi_master *master);
extern int
spi_add_device(struct spi_device *spi);
extern struct spi_device *
spi_new_device(struct spi_master *, struct spi_board_info *);

View File

@@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
return pci_dma_mapping_error(addr);
return pci_dma_mapping_error(dev->bus->host_pci, addr);
case SSB_BUSTYPE_SSB:
return dma_mapping_error(addr);
return dma_mapping_error(dev->dev, addr);
default:
__ssb_dma_not_implemented(dev);
}

View File

@@ -5,41 +5,43 @@
(and more). So the "read" side to such a lock is anything which
diables preeempt. */
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <asm/system.h>
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
/* Deprecated, but useful for transition. */
#define ALL_CPUS ~0U
/**
* stop_machine_run: freeze the machine on all CPUs and run this function
* stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn()
* @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS.
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
* Description: This causes a thread to be scheduled on every other cpu,
* each of which disables interrupts, and finally interrupts are disabled
* on the current CPU. The result is that noone is holding a spinlock
* or inside any other preempt-disabled region when @fn() runs.
* Description: This causes a thread to be scheduled on every cpu,
* each of which disables interrupts. The result is that noone is
* holding a spinlock or inside any other preempt-disabled region when
* @fn() runs.
*
* This can be thought of as a very heavy write lock, equivalent to
* grabbing every spinlock in the kernel. */
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu);
int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
/**
* __stop_machine_run: freeze the machine on all CPUs and run this function
* __stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn
* @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS.
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
* Description: This is a special version of the above, which returns the
* thread which has run @fn(): kthread_stop will return the return value
* of @fn(). Used by hotplug cpu.
* Description: This is a special version of the above, which assumes cpus
* won't come or go while it's being called. Used by hotplug cpu.
*/
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu);
int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus);
#else
static inline int stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu)
static inline int stop_machine(int (*fn)(void *), void *data,
const cpumask_t *cpus)
{
int ret;
local_irq_disable();
@@ -48,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data,
return ret;
}
#endif /* CONFIG_SMP */
static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu)
{
/* If they don't care which cpu fn runs on, just pick one. */
if (cpu == NR_CPUS)
return stop_machine(fn, data, NULL);
else if (cpu == ~0U)
return stop_machine(fn, data, &cpu_possible_map);
else {
cpumask_t cpus = cpumask_of_cpu(cpu);
return stop_machine(fn, data, &cpus);
}
}
#endif /* _LINUX_STOP_MACHINE */

View File

@@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e)
}
#endif
extern struct mutex pm_mutex;
#endif /* _LINUX_SUSPEND_H */

View File

@@ -237,7 +237,6 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
/* linux/mm/swapfile.c */
extern long total_swap_pages;
extern unsigned int nr_swapfiles;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
@@ -254,8 +253,6 @@ extern int can_share_swap_page(struct page *);
extern int remove_exclusive_swap_page(struct page *);
struct backing_dev_info;
extern spinlock_t swap_lock;
/* linux/mm/thrash.c */
extern struct mm_struct * swap_token_mm;
extern void grab_swap_token(void);

View File

@@ -947,6 +947,22 @@ struct ctl_table;
struct nsproxy;
struct ctl_table_root;
struct ctl_table_set {
struct list_head list;
struct ctl_table_set *parent;
int (*is_seen)(struct ctl_table_set *);
};
extern void setup_sysctl_set(struct ctl_table_set *p,
struct ctl_table_set *parent,
int (*is_seen)(struct ctl_table_set *));
struct ctl_table_header;
extern void sysctl_head_get(struct ctl_table_header *);
extern void sysctl_head_put(struct ctl_table_header *);
extern int sysctl_is_seen(struct ctl_table_header *);
extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *);
extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
struct ctl_table_header *prev);
@@ -1049,8 +1065,8 @@ struct ctl_table
struct ctl_table_root {
struct list_head root_list;
struct list_head header_list;
struct list_head *(*lookup)(struct ctl_table_root *root,
struct ctl_table_set default_set;
struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
struct nsproxy *namespaces);
int (*permissions)(struct ctl_table_root *root,
struct nsproxy *namespaces, struct ctl_table *table);
@@ -1063,9 +1079,14 @@ struct ctl_table_header
struct ctl_table *ctl_table;
struct list_head ctl_entry;
int used;
int count;
struct completion *unregistering;
struct ctl_table *ctl_table_arg;
struct ctl_table_root *root;
struct ctl_table_set *set;
struct ctl_table *attached_by;
struct ctl_table *attached_to;
struct ctl_table_header *parent;
};
/* struct ctl_path describes where in the hierarchy a table is added */

View File

@@ -8,8 +8,19 @@
* Blame akpm@osdl.org for all this.
*/
#ifdef CONFIG_TASK_IO_ACCOUNTING
struct task_io_accounting {
#ifdef CONFIG_TASK_XACCT
/* bytes read */
u64 rchar;
/* bytes written */
u64 wchar;
/* # of read syscalls */
u64 syscr;
/* # of write syscalls */
u64 syscw;
#endif /* CONFIG_TASK_XACCT */
#ifdef CONFIG_TASK_IO_ACCOUNTING
/*
* The number of bytes which this task has caused to be read from
* storage.
@@ -30,8 +41,5 @@ struct task_io_accounting {
* information loss in doing that.
*/
u64 cancelled_write_bytes;
#endif /* CONFIG_TASK_IO_ACCOUNTING */
};
#else
struct task_io_accounting {
};
#endif

View File

@@ -40,9 +40,17 @@ static inline void task_io_account_cancelled_write(size_t bytes)
current->ioac.cancelled_write_bytes += bytes;
}
static inline void task_io_accounting_init(struct task_struct *tsk)
static inline void task_io_accounting_init(struct task_io_accounting *ioac)
{
memset(&tsk->ioac, 0, sizeof(tsk->ioac));
memset(ioac, 0, sizeof(*ioac));
}
static inline void task_blk_io_accounting_add(struct task_io_accounting *dst,
struct task_io_accounting *src)
{
dst->read_bytes += src->read_bytes;
dst->write_bytes += src->write_bytes;
dst->cancelled_write_bytes += src->cancelled_write_bytes;
}
#else
@@ -69,9 +77,37 @@ static inline void task_io_account_cancelled_write(size_t bytes)
{
}
static inline void task_io_accounting_init(struct task_struct *tsk)
static inline void task_io_accounting_init(struct task_io_accounting *ioac)
{
}
#endif /* CONFIG_TASK_IO_ACCOUNTING */
#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */
static inline void task_blk_io_accounting_add(struct task_io_accounting *dst,
struct task_io_accounting *src)
{
}
#endif /* CONFIG_TASK_IO_ACCOUNTING */
#ifdef CONFIG_TASK_XACCT
static inline void task_chr_io_accounting_add(struct task_io_accounting *dst,
struct task_io_accounting *src)
{
dst->rchar += src->rchar;
dst->wchar += src->wchar;
dst->syscr += src->syscr;
dst->syscw += src->syscw;
}
#else
static inline void task_chr_io_accounting_add(struct task_io_accounting *dst,
struct task_io_accounting *src)
{
}
#endif /* CONFIG_TASK_XACCT */
static inline void task_io_accounting_add(struct task_io_accounting *dst,
struct task_io_accounting *src)
{
task_chr_io_accounting_add(dst, src);
task_blk_io_accounting_add(dst, src);
}
#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */

576
include/linux/tracehook.h Normal file
View File

@@ -0,0 +1,576 @@
/*
* Tracing hooks
*
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* This file defines hook entry points called by core code where
* user tracing/debugging support might need to do something. These
* entry points are called tracehook_*(). Each hook declared below
* has a detailed kerneldoc comment giving the context (locking et
* al) from which it is called, and the meaning of its return value.
*
* Each function here typically has only one call site, so it is ok
* to have some nontrivial tracehook_*() inlines. In all cases, the
* fast path when no tracing is enabled should be very short.
*
* The purpose of this file and the tracehook_* layer is to consolidate
* the interface that the kernel core and arch code uses to enable any
* user debugging or tracing facility (such as ptrace). The interfaces
* here are carefully documented so that maintainers of core and arch
* code do not need to think about the implementation details of the
* tracing facilities. Likewise, maintainers of the tracing code do not
* need to understand all the calling core or arch code in detail, just
* documented circumstances of each call, such as locking conditions.
*
* If the calling core code changes so that locking is different, then
* it is ok to change the interface documented here. The maintainer of
* core code changing should notify the maintainers of the tracing code
* that they need to work out the change.
*
* Some tracehook_*() inlines take arguments that the current tracing
* implementations might not necessarily use. These function signatures
* are chosen to pass in all the information that is on hand in the
* caller and might conceivably be relevant to a tracer, so that the
* core code won't have to be updated when tracing adds more features.
* If a call site changes so that some of those parameters are no longer
* already on hand without extra work, then the tracehook_* interface
* can change so there is no make-work burden on the core code. The
* maintainer of core code changing should notify the maintainers of the
* tracing code that they need to work out the change.
*/
#ifndef _LINUX_TRACEHOOK_H
#define _LINUX_TRACEHOOK_H 1
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/security.h>
struct linux_binprm;
/**
* tracehook_expect_breakpoints - guess if task memory might be touched
* @task: current task, making a new mapping
*
* Return nonzero if @task is expected to want breakpoint insertion in
* its memory at some point. A zero return is no guarantee it won't
* be done, but this is a hint that it's known to be likely.
*
* May be called with @task->mm->mmap_sem held for writing.
*/
static inline int tracehook_expect_breakpoints(struct task_struct *task)
{
return (task_ptrace(task) & PT_PTRACED) != 0;
}
/*
* ptrace report for syscall entry and exit looks identical.
*/
static inline void ptrace_report_syscall(struct pt_regs *regs)
{
int ptrace = task_ptrace(current);
if (!(ptrace & PT_PTRACED))
return;
ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
/**
* tracehook_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
* This will be called if %TIF_SYSCALL_TRACE has been set, when the
* current task has just entered the kernel for a system call.
* Full user register state is available here. Changing the values
* in @regs can affect the system call number and arguments to be tried.
* It is safe to block here, preventing the system call from beginning.
*
* Returns zero normally, or nonzero if the calling arch code should abort
* the system call. That must prevent normal entry so no system call is
* made. If @task ever returns to user mode after this, its register state
* is unspecified, but should be something harmless like an %ENOSYS error
* return. It should preserve enough information so that syscall_rollback()
* can work (see asm-generic/syscall.h).
*
* Called without locks, just after entering kernel mode.
*/
static inline __must_check int tracehook_report_syscall_entry(
struct pt_regs *regs)
{
ptrace_report_syscall(regs);
return 0;
}
/**
* tracehook_report_syscall_exit - task has just finished a system call
* @regs: user register state of current task
* @step: nonzero if simulating single-step or block-step
*
* This will be called if %TIF_SYSCALL_TRACE has been set, when the
* current task has just finished an attempted system call. Full
* user register state is available here. It is safe to block here,
* preventing signals from being processed.
*
* If @step is nonzero, this report is also in lieu of the normal
* trap that would follow the system call instruction because
* user_enable_block_step() or user_enable_single_step() was used.
* In this case, %TIF_SYSCALL_TRACE might not be set.
*
* Called without locks, just before checking for pending signals.
*/
static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
ptrace_report_syscall(regs);
}
/**
* tracehook_unsafe_exec - check for exec declared unsafe due to tracing
* @task: current task doing exec
*
* Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
*
* Called with task_lock() held on @task.
*/
static inline int tracehook_unsafe_exec(struct task_struct *task)
{
int unsafe = 0;
int ptrace = task_ptrace(task);
if (ptrace & PT_PTRACED) {
if (ptrace & PT_PTRACE_CAP)
unsafe |= LSM_UNSAFE_PTRACE_CAP;
else
unsafe |= LSM_UNSAFE_PTRACE;
}
return unsafe;
}
/**
* tracehook_tracer_task - return the task that is tracing the given task
* @tsk: task to consider
*
* Returns NULL if noone is tracing @task, or the &struct task_struct
* pointer to its tracer.
*
* Must called under rcu_read_lock(). The pointer returned might be kept
* live only by RCU. During exec, this may be called with task_lock()
* held on @task, still held from when tracehook_unsafe_exec() was called.
*/
static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
{
if (task_ptrace(tsk) & PT_PTRACED)
return rcu_dereference(tsk->parent);
return NULL;
}
/**
* tracehook_report_exec - a successful exec was completed
* @fmt: &struct linux_binfmt that performed the exec
* @bprm: &struct linux_binprm containing exec details
* @regs: user-mode register state
*
* An exec just completed, we are shortly going to return to user mode.
* The freshly initialized register state can be seen and changed in @regs.
* The name, file and other pointers in @bprm are still on hand to be
* inspected, but will be freed as soon as this returns.
*
* Called with no locks, but with some kernel resources held live
* and a reference on @fmt->module.
*/
static inline void tracehook_report_exec(struct linux_binfmt *fmt,
struct linux_binprm *bprm,
struct pt_regs *regs)
{
if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
unlikely(task_ptrace(current) & PT_PTRACED))
send_sig(SIGTRAP, current, 0);
}
/**
* tracehook_report_exit - task has begun to exit
* @exit_code: pointer to value destined for @current->exit_code
*
* @exit_code points to the value passed to do_exit(), which tracing
* might change here. This is almost the first thing in do_exit(),
* before freeing any resources or setting the %PF_EXITING flag.
*
* Called with no locks held.
*/
static inline void tracehook_report_exit(long *exit_code)
{
ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
}
/**
* tracehook_prepare_clone - prepare for new child to be cloned
* @clone_flags: %CLONE_* flags from clone/fork/vfork system call
*
* This is called before a new user task is to be cloned.
* Its return value will be passed to tracehook_finish_clone().
*
* Called with no locks held.
*/
static inline int tracehook_prepare_clone(unsigned clone_flags)
{
if (clone_flags & CLONE_UNTRACED)
return 0;
if (clone_flags & CLONE_VFORK) {
if (current->ptrace & PT_TRACE_VFORK)
return PTRACE_EVENT_VFORK;
} else if ((clone_flags & CSIGNAL) != SIGCHLD) {
if (current->ptrace & PT_TRACE_CLONE)
return PTRACE_EVENT_CLONE;
} else if (current->ptrace & PT_TRACE_FORK)
return PTRACE_EVENT_FORK;
return 0;
}
/**
* tracehook_finish_clone - new child created and being attached
* @child: new child task
* @clone_flags: %CLONE_* flags from clone/fork/vfork system call
* @trace: return value from tracehook_prepare_clone()
*
* This is called immediately after adding @child to its parent's children list.
* The @trace value is that returned by tracehook_prepare_clone().
*
* Called with current's siglock and write_lock_irq(&tasklist_lock) held.
*/
static inline void tracehook_finish_clone(struct task_struct *child,
unsigned long clone_flags, int trace)
{
ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
}
/**
* tracehook_report_clone - in parent, new child is about to start running
* @trace: return value from tracehook_prepare_clone()
* @regs: parent's user register state
* @clone_flags: flags from parent's system call
* @pid: new child's PID in the parent's namespace
* @child: new child task
*
* Called after a child is set up, but before it has been started
* running. @trace is the value returned by tracehook_prepare_clone().
* This is not a good place to block, because the child has not started
* yet. Suspend the child here if desired, and then block in
* tracehook_report_clone_complete(). This must prevent the child from
* self-reaping if tracehook_report_clone_complete() uses the @child
* pointer; otherwise it might have died and been released by the time
* tracehook_report_report_clone_complete() is called.
*
* Called with no locks held, but the child cannot run until this returns.
*/
static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
unsigned long clone_flags,
pid_t pid, struct task_struct *child)
{
if (unlikely(trace)) {
/*
* The child starts up with an immediate SIGSTOP.
*/
sigaddset(&child->pending.signal, SIGSTOP);
set_tsk_thread_flag(child, TIF_SIGPENDING);
}
}
/**
* tracehook_report_clone_complete - new child is running
* @trace: return value from tracehook_prepare_clone()
* @regs: parent's user register state
* @clone_flags: flags from parent's system call
* @pid: new child's PID in the parent's namespace
* @child: child task, already running
*
* This is called just after the child has started running. This is
* just before the clone/fork syscall returns, or blocks for vfork
* child completion if @clone_flags has the %CLONE_VFORK bit set.
* The @child pointer may be invalid if a self-reaping child died and
* tracehook_report_clone() took no action to prevent it from self-reaping.
*
* Called with no locks held.
*/
static inline void tracehook_report_clone_complete(int trace,
struct pt_regs *regs,
unsigned long clone_flags,
pid_t pid,
struct task_struct *child)
{
if (unlikely(trace))
ptrace_event(0, trace, pid);
}
/**
* tracehook_report_vfork_done - vfork parent's child has exited or exec'd
* @child: child task, already running
* @pid: new child's PID in the parent's namespace
*
* Called after a %CLONE_VFORK parent has waited for the child to complete.
* The clone/vfork system call will return immediately after this.
* The @child pointer may be invalid if a self-reaping child died and
* tracehook_report_clone() took no action to prevent it from self-reaping.
*
* Called with no locks held.
*/
static inline void tracehook_report_vfork_done(struct task_struct *child,
pid_t pid)
{
ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
}
/**
* tracehook_prepare_release_task - task is being reaped, clean up tracing
* @task: task in %EXIT_DEAD state
*
* This is called in release_task() just before @task gets finally reaped
* and freed. This would be the ideal place to remove and clean up any
* tracing-related state for @task.
*
* Called with no locks held.
*/
static inline void tracehook_prepare_release_task(struct task_struct *task)
{
}
/**
* tracehook_finish_release_task - final tracing clean-up
* @task: task in %EXIT_DEAD state
*
* This is called in release_task() when @task is being in the middle of
* being reaped. After this, there must be no tracing entanglements.
*
* Called with write_lock_irq(&tasklist_lock) held.
*/
static inline void tracehook_finish_release_task(struct task_struct *task)
{
ptrace_release_task(task);
}
/**
* tracehook_signal_handler - signal handler setup is complete
* @sig: number of signal being delivered
* @info: siginfo_t of signal being delivered
* @ka: sigaction setting that chose the handler
* @regs: user register state
* @stepping: nonzero if debugger single-step or block-step in use
*
* Called by the arch code after a signal handler has been set up.
* Register and stack state reflects the user handler about to run.
* Signal mask changes have already been made.
*
* Called without locks, shortly before returning to user mode
* (or handling more signals).
*/
static inline void tracehook_signal_handler(int sig, siginfo_t *info,
const struct k_sigaction *ka,
struct pt_regs *regs, int stepping)
{
if (stepping)
ptrace_notify(SIGTRAP);
}
/**
* tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
* @task: task receiving the signal
* @sig: signal number being sent
* @handler: %SIG_IGN or %SIG_DFL
*
* Return zero iff tracing doesn't care to examine this ignored signal,
* so it can short-circuit normal delivery and never even get queued.
* Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN.
*
* Called with @task->sighand->siglock held.
*/
static inline int tracehook_consider_ignored_signal(struct task_struct *task,
int sig,
void __user *handler)
{
return (task_ptrace(task) & PT_PTRACED) != 0;
}
/**
* tracehook_consider_fatal_signal - suppress special handling of fatal signal
* @task: task receiving the signal
* @sig: signal number being sent
* @handler: %SIG_DFL or %SIG_IGN
*
* Return nonzero to prevent special handling of this termination signal.
* Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored,
* in which case force_sig() is about to reset it to %SIG_DFL.
* When this returns zero, this signal might cause a quick termination
* that does not give the debugger a chance to intercept the signal.
*
* Called with or without @task->sighand->siglock held.
*/
static inline int tracehook_consider_fatal_signal(struct task_struct *task,
int sig,
void __user *handler)
{
return (task_ptrace(task) & PT_PTRACED) != 0;
}
/**
* tracehook_force_sigpending - let tracing force signal_pending(current) on
*
* Called when recomputing our signal_pending() flag. Return nonzero
* to force the signal_pending() flag on, so that tracehook_get_signal()
* will be called before the next return to user mode.
*
* Called with @current->sighand->siglock held.
*/
static inline int tracehook_force_sigpending(void)
{
return 0;
}
/**
* tracehook_get_signal - deliver synthetic signal to traced task
* @task: @current
* @regs: task_pt_regs(@current)
* @info: details of synthetic signal
* @return_ka: sigaction for synthetic signal
*
* Return zero to check for a real pending signal normally.
* Return -1 after releasing the siglock to repeat the check.
* Return a signal number to induce an artifical signal delivery,
* setting *@info and *@return_ka to specify its details and behavior.
*
* The @return_ka->sa_handler value controls the disposition of the
* signal, no matter the signal number. For %SIG_DFL, the return value
* is a representative signal to indicate the behavior (e.g. %SIGTERM
* for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
* %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
* reported will be @info->si_signo instead.
*
* Called with @task->sighand->siglock held, before dequeuing pending signals.
*/
static inline int tracehook_get_signal(struct task_struct *task,
struct pt_regs *regs,
siginfo_t *info,
struct k_sigaction *return_ka)
{
return 0;
}
/**
* tracehook_notify_jctl - report about job control stop/continue
* @notify: nonzero if this is the last thread in the group to stop
* @why: %CLD_STOPPED or %CLD_CONTINUED
*
* This is called when we might call do_notify_parent_cldstop().
* It's called when about to stop for job control; we are already in
* %TASK_STOPPED state, about to call schedule(). It's also called when
* a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made.
*
* Return nonzero to generate a %SIGCHLD with @why, which is
* normal if @notify is nonzero.
*
* Called with no locks held.
*/
static inline int tracehook_notify_jctl(int notify, int why)
{
return notify || (current->ptrace & PT_PTRACED);
}
/**
* tracehook_notify_death - task is dead, ready to notify parent
* @task: @current task now exiting
* @death_cookie: value to pass to tracehook_report_death()
* @group_dead: nonzero if this was the last thread in the group to die
*
* Return the signal number to send our parent with do_notify_parent(), or
* zero to send no signal and leave a zombie, or -1 to self-reap right now.
*
* Called with write_lock_irq(&tasklist_lock) held.
*/
static inline int tracehook_notify_death(struct task_struct *task,
void **death_cookie, int group_dead)
{
if (task->exit_signal == -1)
return task->ptrace ? SIGCHLD : -1;
/*
* If something other than our normal parent is ptracing us, then
* send it a SIGCHLD instead of honoring exit_signal. exit_signal
* only has special meaning to our real parent.
*/
if (thread_group_empty(task) && !ptrace_reparented(task))
return task->exit_signal;
return task->ptrace ? SIGCHLD : 0;
}
/**
* tracehook_report_death - task is dead and ready to be reaped
* @task: @current task now exiting
* @signal: signal number sent to parent, or 0 or -1
* @death_cookie: value passed back from tracehook_notify_death()
* @group_dead: nonzero if this was the last thread in the group to die
*
* Thread has just become a zombie or is about to self-reap. If positive,
* @signal is the signal number just sent to the parent (usually %SIGCHLD).
* If @signal is -1, this thread will self-reap. If @signal is 0, this is
* a delayed_group_leader() zombie. The @death_cookie was passed back by
* tracehook_notify_death().
*
* If normal reaping is not inhibited, @task->exit_state might be changing
* in parallel.
*
* Called without locks.
*/
static inline void tracehook_report_death(struct task_struct *task,
int signal, void *death_cookie,
int group_dead)
{
}
#ifdef TIF_NOTIFY_RESUME
/**
* set_notify_resume - cause tracehook_notify_resume() to be called
* @task: task that will call tracehook_notify_resume()
*
* Calling this arranges that @task will call tracehook_notify_resume()
* before returning to user mode. If it's already running in user mode,
* it will enter the kernel and call tracehook_notify_resume() soon.
* If it's blocked, it will not be woken.
*/
static inline void set_notify_resume(struct task_struct *task)
{
if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
kick_process(task);
}
/**
* tracehook_notify_resume - report when about to return to user mode
* @regs: user-mode registers of @current task
*
* This is called when %TIF_NOTIFY_RESUME has been set. Now we are
* about to return to user mode, and the user state in @regs can be
* inspected or adjusted. The caller in arch code has cleared
* %TIF_NOTIFY_RESUME before the call. If the flag gets set again
* asynchronously, this will be called again before we return to
* user mode.
*
* Called without locks.
*/
static inline void tracehook_notify_resume(struct pt_regs *regs)
{
}
#endif /* TIF_NOTIFY_RESUME */
#endif /* <linux/tracehook.h> */

View File

@@ -17,6 +17,21 @@
#if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__)
#define VID_TYPE_CAPTURE 1 /* Can capture */
#define VID_TYPE_TUNER 2 /* Can tune */
#define VID_TYPE_TELETEXT 4 /* Does teletext */
#define VID_TYPE_OVERLAY 8 /* Overlay onto frame buffer */
#define VID_TYPE_CHROMAKEY 16 /* Overlay by chromakey */
#define VID_TYPE_CLIPPING 32 /* Can clip */
#define VID_TYPE_FRAMERAM 64 /* Uses the frame buffer memory */
#define VID_TYPE_SCALES 128 /* Scalable */
#define VID_TYPE_MONOCHROME 256 /* Monochrome only */
#define VID_TYPE_SUBCAPTURE 512 /* Can capture subareas of the image */
#define VID_TYPE_MPEG_DECODER 1024 /* Can decode MPEG streams */
#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */
#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */
#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */
struct video_capability
{
char name[32];

View File

@@ -71,6 +71,11 @@
*/
#define VIDEO_MAX_FRAME 32
#ifndef __KERNEL__
/* These defines are V4L1 specific and should not be used with the V4L2 API!
They will be removed from this header in the future. */
#define VID_TYPE_CAPTURE 1 /* Can capture */
#define VID_TYPE_TUNER 2 /* Can tune */
#define VID_TYPE_TELETEXT 4 /* Does teletext */
@@ -85,14 +90,15 @@
#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */
#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */
#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */
#endif
/*
* M I S C E L L A N E O U S
*/
/* Four-character-code (FOURCC) */
#define v4l2_fourcc(a,b,c,d)\
(((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
#define v4l2_fourcc(a, b, c, d)\
((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
/*
* E N U M S
@@ -226,8 +232,7 @@ struct v4l2_fract {
/*
* D R I V E R C A P A B I L I T I E S
*/
struct v4l2_capability
{
struct v4l2_capability {
__u8 driver[16]; /* i.e. "bttv" */
__u8 card[32]; /* i.e. "Hauppauge WinTV" */
__u8 bus_info[32]; /* "PCI:" + pci_name(pci_dev) */
@@ -259,8 +264,7 @@ struct v4l2_capability
/*
* V I D E O I M A G E F O R M A T
*/
struct v4l2_pix_format
{
struct v4l2_pix_format {
__u32 width;
__u32 height;
__u32 pixelformat;
@@ -272,68 +276,69 @@ struct v4l2_pix_format
};
/* Pixel format FOURCC depth Description */
#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R','G','B','1') /* 8 RGB-3-3-2 */
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R','4','4','4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R','G','B','O') /* 16 RGB-5-5-5 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R','G','B','P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R','G','B','Q') /* 16 RGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R','G','B','R') /* 16 RGB-5-6-5 BE */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B','G','R','3') /* 24 BGR-8-8-8 */
#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R','G','B','3') /* 24 RGB-8-8-8 */
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B','G','R','4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R','G','B','4') /* 32 RGB-8-8-8-8 */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G','R','E','Y') /* 8 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y','1','6',' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P','A','L','8') /* 8 8-bit palette */
#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y','V','U','9') /* 9 YVU 4:1:0 */
#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y','V','1','2') /* 12 YVU 4:2:0 */
#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y','U','Y','V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U','Y','V','Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4','2','2','P') /* 16 YVU422 planar */
#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4','1','1','P') /* 16 YVU411 planar */
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y','4','1','P') /* 12 YUV 4:1:1 */
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y','4','4','4') /* 16 xxxxyyyy uuuuvvvv */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y','U','V','O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y','U','V','P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y','U','V','4') /* 32 YUV-8-8-8-8 */
#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */
#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */
#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N','V','1','2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N','V','2','1') /* 12 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
/* The following formats are not defined in the V4L2 specification */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y','U','V','9') /* 9 YUV 4:1:0 */
#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */
#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:2:0 16x16 macroblocks */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */
#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G','B','R','G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B','Y','R','2') /* 16 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
/* compressed formats */
#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M','J','P','G') /* Motion-JPEG */
#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J','P','E','G') /* JFIF JPEG */
#define V4L2_PIX_FMT_DV v4l2_fourcc('d','v','s','d') /* 1394 */
#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M','P','E','G') /* MPEG-1/2/4 */
#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */
#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */
#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */
#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W','N','V','A') /* Winnov hw compress */
#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S','9','1','0') /* SN9C10x compression */
#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1') /* pwc older webcam */
#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2') /* pwc newer webcam */
#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E','6','2','5') /* ET61X251 compression */
#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S','5','0','1') /* YUYV per line */
#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S','5','6','1') /* compressed GBRG bayer */
#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P','2','0','7') /* compressed BGGR bayer */
#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */
#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */
#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */
#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */
#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */
#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */
#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
/*
* F O R M A T E N U M E R A T I O N
*/
struct v4l2_fmtdesc
{
struct v4l2_fmtdesc {
__u32 index; /* Format number */
enum v4l2_buf_type type; /* buffer type */
__u32 flags;
@@ -349,21 +354,18 @@ struct v4l2_fmtdesc
/*
* F R A M E S I Z E E N U M E R A T I O N
*/
enum v4l2_frmsizetypes
{
enum v4l2_frmsizetypes {
V4L2_FRMSIZE_TYPE_DISCRETE = 1,
V4L2_FRMSIZE_TYPE_CONTINUOUS = 2,
V4L2_FRMSIZE_TYPE_STEPWISE = 3,
};
struct v4l2_frmsize_discrete
{
struct v4l2_frmsize_discrete {
__u32 width; /* Frame width [pixel] */
__u32 height; /* Frame height [pixel] */
};
struct v4l2_frmsize_stepwise
{
struct v4l2_frmsize_stepwise {
__u32 min_width; /* Minimum frame width [pixel] */
__u32 max_width; /* Maximum frame width [pixel] */
__u32 step_width; /* Frame width step size [pixel] */
@@ -372,8 +374,7 @@ struct v4l2_frmsize_stepwise
__u32 step_height; /* Frame height step size [pixel] */
};
struct v4l2_frmsizeenum
{
struct v4l2_frmsizeenum {
__u32 index; /* Frame size number */
__u32 pixel_format; /* Pixel format */
__u32 type; /* Frame size type the device supports. */
@@ -389,22 +390,19 @@ struct v4l2_frmsizeenum
/*
* F R A M E R A T E E N U M E R A T I O N
*/
enum v4l2_frmivaltypes
{
enum v4l2_frmivaltypes {
V4L2_FRMIVAL_TYPE_DISCRETE = 1,
V4L2_FRMIVAL_TYPE_CONTINUOUS = 2,
V4L2_FRMIVAL_TYPE_STEPWISE = 3,
};
struct v4l2_frmival_stepwise
{
struct v4l2_frmival_stepwise {
struct v4l2_fract min; /* Minimum frame interval [s] */
struct v4l2_fract max; /* Maximum frame interval [s] */
struct v4l2_fract step; /* Frame interval step size [s] */
};
struct v4l2_frmivalenum
{
struct v4l2_frmivalenum {
__u32 index; /* Frame format index */
__u32 pixel_format; /* Pixel format */
__u32 width; /* Frame width */
@@ -423,8 +421,7 @@ struct v4l2_frmivalenum
/*
* T I M E C O D E
*/
struct v4l2_timecode
{
struct v4l2_timecode {
__u32 type;
__u32 flags;
__u8 frames;
@@ -449,8 +446,7 @@ struct v4l2_timecode
#define V4L2_TC_USERBITS_8BITCHARS 0x0008
/* The above is based on SMPTE timecodes */
struct v4l2_jpegcompression
{
struct v4l2_jpegcompression {
int quality;
int APPn; /* Number of APP segment to be written,
@@ -482,16 +478,14 @@ struct v4l2_jpegcompression
/*
* M E M O R Y - M A P P I N G B U F F E R S
*/
struct v4l2_requestbuffers
{
struct v4l2_requestbuffers {
__u32 count;
enum v4l2_buf_type type;
enum v4l2_memory memory;
__u32 reserved[2];
};
struct v4l2_buffer
{
struct v4l2_buffer {
__u32 index;
enum v4l2_buf_type type;
__u32 bytesused;
@@ -525,13 +519,12 @@ struct v4l2_buffer
/*
* O V E R L A Y P R E V I E W
*/
struct v4l2_framebuffer
{
struct v4l2_framebuffer {
__u32 capability;
__u32 flags;
/* FIXME: in theory we should pass something like PCI device + memory
* region + offset instead of some physical address */
void* base;
void *base;
struct v4l2_pix_format fmt;
};
/* Flags for the 'capability' field. Read only */
@@ -550,14 +543,12 @@ struct v4l2_framebuffer
#define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010
#define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020
struct v4l2_clip
{
struct v4l2_clip {
struct v4l2_rect c;
struct v4l2_clip __user *next;
};
struct v4l2_window
{
struct v4l2_window {
struct v4l2_rect w;
enum v4l2_field field;
__u32 chromakey;
@@ -570,8 +561,7 @@ struct v4l2_window
/*
* C A P T U R E P A R A M E T E R S
*/
struct v4l2_captureparm
{
struct v4l2_captureparm {
__u32 capability; /* Supported modes */
__u32 capturemode; /* Current mode */
struct v4l2_fract timeperframe; /* Time per frame in .1us units */
@@ -584,8 +574,7 @@ struct v4l2_captureparm
#define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */
#define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */
struct v4l2_outputparm
{
struct v4l2_outputparm {
__u32 capability; /* Supported modes */
__u32 outputmode; /* Current mode */
struct v4l2_fract timeperframe; /* Time per frame in seconds */
@@ -702,8 +691,7 @@ typedef __u64 v4l2_std_id;
#define V4L2_STD_ALL (V4L2_STD_525_60 |\
V4L2_STD_625_50)
struct v4l2_standard
{
struct v4l2_standard {
__u32 index;
v4l2_std_id id;
__u8 name[24];
@@ -715,8 +703,7 @@ struct v4l2_standard
/*
* V I D E O I N P U T S
*/
struct v4l2_input
{
struct v4l2_input {
__u32 index; /* Which input */
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
@@ -753,8 +740,7 @@ struct v4l2_input
/*
* V I D E O O U T P U T S
*/
struct v4l2_output
{
struct v4l2_output {
__u32 index; /* Which output */
__u8 name[32]; /* Label */
__u32 type; /* Type of output */
@@ -771,14 +757,12 @@ struct v4l2_output
/*
* C O N T R O L S
*/
struct v4l2_control
{
struct v4l2_control {
__u32 id;
__s32 value;
};
struct v4l2_ext_control
{
struct v4l2_ext_control {
__u32 id;
__u32 reserved2[2];
union {
@@ -788,8 +772,7 @@ struct v4l2_ext_control
};
} __attribute__ ((packed));
struct v4l2_ext_controls
{
struct v4l2_ext_controls {
__u32 ctrl_class;
__u32 count;
__u32 error_idx;
@@ -807,8 +790,7 @@ struct v4l2_ext_controls
#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000)
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
struct v4l2_queryctrl
{
struct v4l2_queryctrl {
__u32 id;
enum v4l2_ctrl_type type;
__u8 name[32]; /* Whatever */
@@ -821,8 +803,7 @@ struct v4l2_queryctrl
};
/* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */
struct v4l2_querymenu
{
struct v4l2_querymenu {
__u32 id;
__u32 index;
__u8 name[32]; /* Whatever */
@@ -1104,8 +1085,7 @@ enum v4l2_exposure_auto_type {
/*
* T U N I N G
*/
struct v4l2_tuner
{
struct v4l2_tuner {
__u32 index;
__u8 name[32];
enum v4l2_tuner_type type;
@@ -1119,8 +1099,7 @@ struct v4l2_tuner
__u32 reserved[4];
};
struct v4l2_modulator
{
struct v4l2_modulator {
__u32 index;
__u8 name[32];
__u32 capability;
@@ -1153,8 +1132,7 @@ struct v4l2_modulator
#define V4L2_TUNER_MODE_LANG1 0x0003
#define V4L2_TUNER_MODE_LANG1_LANG2 0x0004
struct v4l2_frequency
{
struct v4l2_frequency {
__u32 tuner;
enum v4l2_tuner_type type;
__u32 frequency;
@@ -1172,8 +1150,7 @@ struct v4l2_hw_freq_seek {
/*
* A U D I O
*/
struct v4l2_audio
{
struct v4l2_audio {
__u32 index;
__u8 name[32];
__u32 capability;
@@ -1188,8 +1165,7 @@ struct v4l2_audio
/* Flags for the 'mode' field */
#define V4L2_AUDMODE_AVL 0x00001
struct v4l2_audioout
{
struct v4l2_audioout {
__u32 index;
__u8 name[32];
__u32 capability;
@@ -1253,8 +1229,7 @@ struct v4l2_encoder_cmd {
*/
/* Raw VBI */
struct v4l2_vbi_format
{
struct v4l2_vbi_format {
__u32 sampling_rate; /* in 1 Hz */
__u32 offset;
__u32 samples_per_line;
@@ -1266,8 +1241,8 @@ struct v4l2_vbi_format
};
/* VBI flags */
#define V4L2_VBI_UNSYNC (1<< 0)
#define V4L2_VBI_INTERLACED (1<< 1)
#define V4L2_VBI_UNSYNC (1 << 0)
#define V4L2_VBI_INTERLACED (1 << 1)
/* Sliced VBI
*
@@ -1276,8 +1251,7 @@ struct v4l2_vbi_format
* notice in the definitive implementation.
*/
struct v4l2_sliced_vbi_format
{
struct v4l2_sliced_vbi_format {
__u16 service_set;
/* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
@@ -1301,8 +1275,7 @@ struct v4l2_sliced_vbi_format
#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525)
#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
struct v4l2_sliced_vbi_cap
{
struct v4l2_sliced_vbi_cap {
__u16 service_set;
/* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
@@ -1313,8 +1286,7 @@ struct v4l2_sliced_vbi_cap
__u32 reserved[3]; /* must be 0 */
};
struct v4l2_sliced_vbi_data
{
struct v4l2_sliced_vbi_data {
__u32 id;
__u32 field; /* 0: first field, 1: second field */
__u32 line; /* 1-23 */
@@ -1328,27 +1300,23 @@ struct v4l2_sliced_vbi_data
/* Stream data format
*/
struct v4l2_format
{
struct v4l2_format {
enum v4l2_buf_type type;
union
{
struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE
struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY
struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE
struct v4l2_sliced_vbi_format sliced; // V4L2_BUF_TYPE_SLICED_VBI_CAPTURE
__u8 raw_data[200]; // user-defined
union {
struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
__u8 raw_data[200]; /* user-defined */
} fmt;
};
/* Stream type-dependent parameters
*/
struct v4l2_streamparm
{
struct v4l2_streamparm {
enum v4l2_buf_type type;
union
{
union {
struct v4l2_captureparm capture;
struct v4l2_outputparm output;
__u8 raw_data[200]; /* user-defined */
@@ -1386,92 +1354,86 @@ struct v4l2_chip_ident {
* I O C T L C O D E S F O R V I D E O D E V I C E S
*
*/
#define VIDIOC_QUERYCAP _IOR ('V', 0, struct v4l2_capability)
#define VIDIOC_RESERVED _IO ('V', 1)
#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc)
#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format)
#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format)
#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers)
#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer)
#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer)
#define VIDIOC_S_FBUF _IOW ('V', 11, struct v4l2_framebuffer)
#define VIDIOC_OVERLAY _IOW ('V', 14, int)
#define VIDIOC_QBUF _IOWR ('V', 15, struct v4l2_buffer)
#define VIDIOC_DQBUF _IOWR ('V', 17, struct v4l2_buffer)
#define VIDIOC_STREAMON _IOW ('V', 18, int)
#define VIDIOC_STREAMOFF _IOW ('V', 19, int)
#define VIDIOC_G_PARM _IOWR ('V', 21, struct v4l2_streamparm)
#define VIDIOC_S_PARM _IOWR ('V', 22, struct v4l2_streamparm)
#define VIDIOC_G_STD _IOR ('V', 23, v4l2_std_id)
#define VIDIOC_S_STD _IOW ('V', 24, v4l2_std_id)
#define VIDIOC_ENUMSTD _IOWR ('V', 25, struct v4l2_standard)
#define VIDIOC_ENUMINPUT _IOWR ('V', 26, struct v4l2_input)
#define VIDIOC_G_CTRL _IOWR ('V', 27, struct v4l2_control)
#define VIDIOC_S_CTRL _IOWR ('V', 28, struct v4l2_control)
#define VIDIOC_G_TUNER _IOWR ('V', 29, struct v4l2_tuner)
#define VIDIOC_S_TUNER _IOW ('V', 30, struct v4l2_tuner)
#define VIDIOC_G_AUDIO _IOR ('V', 33, struct v4l2_audio)
#define VIDIOC_S_AUDIO _IOW ('V', 34, struct v4l2_audio)
#define VIDIOC_QUERYCTRL _IOWR ('V', 36, struct v4l2_queryctrl)
#define VIDIOC_QUERYMENU _IOWR ('V', 37, struct v4l2_querymenu)
#define VIDIOC_G_INPUT _IOR ('V', 38, int)
#define VIDIOC_S_INPUT _IOWR ('V', 39, int)
#define VIDIOC_G_OUTPUT _IOR ('V', 46, int)
#define VIDIOC_S_OUTPUT _IOWR ('V', 47, int)
#define VIDIOC_ENUMOUTPUT _IOWR ('V', 48, struct v4l2_output)
#define VIDIOC_G_AUDOUT _IOR ('V', 49, struct v4l2_audioout)
#define VIDIOC_S_AUDOUT _IOW ('V', 50, struct v4l2_audioout)
#define VIDIOC_G_MODULATOR _IOWR ('V', 54, struct v4l2_modulator)
#define VIDIOC_S_MODULATOR _IOW ('V', 55, struct v4l2_modulator)
#define VIDIOC_G_FREQUENCY _IOWR ('V', 56, struct v4l2_frequency)
#define VIDIOC_S_FREQUENCY _IOW ('V', 57, struct v4l2_frequency)
#define VIDIOC_CROPCAP _IOWR ('V', 58, struct v4l2_cropcap)
#define VIDIOC_G_CROP _IOWR ('V', 59, struct v4l2_crop)
#define VIDIOC_S_CROP _IOW ('V', 60, struct v4l2_crop)
#define VIDIOC_G_JPEGCOMP _IOR ('V', 61, struct v4l2_jpegcompression)
#define VIDIOC_S_JPEGCOMP _IOW ('V', 62, struct v4l2_jpegcompression)
#define VIDIOC_QUERYSTD _IOR ('V', 63, v4l2_std_id)
#define VIDIOC_TRY_FMT _IOWR ('V', 64, struct v4l2_format)
#define VIDIOC_ENUMAUDIO _IOWR ('V', 65, struct v4l2_audio)
#define VIDIOC_ENUMAUDOUT _IOWR ('V', 66, struct v4l2_audioout)
#define VIDIOC_G_PRIORITY _IOR ('V', 67, enum v4l2_priority)
#define VIDIOC_S_PRIORITY _IOW ('V', 68, enum v4l2_priority)
#define VIDIOC_G_SLICED_VBI_CAP _IOWR ('V', 69, struct v4l2_sliced_vbi_cap)
#define VIDIOC_LOG_STATUS _IO ('V', 70)
#define VIDIOC_G_EXT_CTRLS _IOWR ('V', 71, struct v4l2_ext_controls)
#define VIDIOC_S_EXT_CTRLS _IOWR ('V', 72, struct v4l2_ext_controls)
#define VIDIOC_TRY_EXT_CTRLS _IOWR ('V', 73, struct v4l2_ext_controls)
#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
#define VIDIOC_RESERVED _IO('V', 1)
#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers)
#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer)
#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer)
#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer)
#define VIDIOC_OVERLAY _IOW('V', 14, int)
#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer)
#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer)
#define VIDIOC_STREAMON _IOW('V', 18, int)
#define VIDIOC_STREAMOFF _IOW('V', 19, int)
#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm)
#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm)
#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id)
#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id)
#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input)
#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control)
#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control)
#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner)
#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner)
#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio)
#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio)
#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl)
#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu)
#define VIDIOC_G_INPUT _IOR('V', 38, int)
#define VIDIOC_S_INPUT _IOWR('V', 39, int)
#define VIDIOC_G_OUTPUT _IOR('V', 46, int)
#define VIDIOC_S_OUTPUT _IOWR('V', 47, int)
#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output)
#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout)
#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout)
#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator)
#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator)
#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency)
#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency)
#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap)
#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop)
#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop)
#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression)
#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression)
#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio)
#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout)
#define VIDIOC_G_PRIORITY _IOR('V', 67, enum v4l2_priority)
#define VIDIOC_S_PRIORITY _IOW('V', 68, enum v4l2_priority)
#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap)
#define VIDIOC_LOG_STATUS _IO('V', 70)
#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls)
#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls)
#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls)
#if 1
#define VIDIOC_ENUM_FRAMESIZES _IOWR ('V', 74, struct v4l2_frmsizeenum)
#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR ('V', 75, struct v4l2_frmivalenum)
#define VIDIOC_G_ENC_INDEX _IOR ('V', 76, struct v4l2_enc_idx)
#define VIDIOC_ENCODER_CMD _IOWR ('V', 77, struct v4l2_encoder_cmd)
#define VIDIOC_TRY_ENCODER_CMD _IOWR ('V', 78, struct v4l2_encoder_cmd)
#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum)
#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum)
#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx)
#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd)
#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd)
/* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
#define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register)
#define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register)
#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_register)
#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_register)
#define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident)
#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident)
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW ('V', 82, struct v4l2_hw_freq_seek)
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
#ifdef __OLD_VIDIOC_
/* for compatibility, will go away some day */
#define VIDIOC_OVERLAY_OLD _IOWR ('V', 14, int)
#define VIDIOC_S_PARM_OLD _IOW ('V', 22, struct v4l2_streamparm)
#define VIDIOC_S_CTRL_OLD _IOW ('V', 28, struct v4l2_control)
#define VIDIOC_G_AUDIO_OLD _IOWR ('V', 33, struct v4l2_audio)
#define VIDIOC_G_AUDOUT_OLD _IOWR ('V', 49, struct v4l2_audioout)
#define VIDIOC_CROPCAP_OLD _IOR ('V', 58, struct v4l2_cropcap)
#define VIDIOC_OVERLAY_OLD _IOWR('V', 14, int)
#define VIDIOC_S_PARM_OLD _IOW('V', 22, struct v4l2_streamparm)
#define VIDIOC_S_CTRL_OLD _IOW('V', 28, struct v4l2_control)
#define VIDIOC_G_AUDIO_OLD _IOWR('V', 33, struct v4l2_audio)
#define VIDIOC_G_AUDOUT_OLD _IOWR('V', 49, struct v4l2_audioout)
#define VIDIOC_CROPCAP_OLD _IOR('V', 58, struct v4l2_cropcap)
#endif
#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
#endif /* __LINUX_VIDEODEV2_H */
/*
* Local variables:
* c-basic-offset: 8
* End:
*/

View File

@@ -45,10 +45,10 @@
#define VTXIOCCLRCACHE_OLD 0x710b /* clear cache on VTX-interface (if avail.) */
#define VTXIOCSETVIRT_OLD 0x710c /* turn on virtual mode (this disables TV-display) */
/*
/*
* Definitions for VTXIOCGETINFO
*/
#define SAA5243 0
#define SAA5246 1
#define SAA5249 2
@@ -57,10 +57,10 @@
typedef struct {
int version_major, version_minor; /* version of driver; if version_major changes, driver */
/* is not backward compatible!!! CHECK THIS!!! */
/* is not backward compatible!!! CHECK THIS!!! */
int numpages; /* number of page-buffers of vtx-chipset */
int cct_type; /* type of vtx-chipset (SAA5243, SAA5246, SAA5248 or
* SAA5249) */
* SAA5249) */
}
vtx_info_t;
@@ -81,7 +81,7 @@ vtx_info_t;
#define PGMASK_HOUR (HR_TEN | HR_UNIT)
#define PGMASK_MINUTE (MIN_TEN | MIN_UNIT)
typedef struct
typedef struct
{
int page; /* number of requested page (hexadecimal) */
int hour; /* requested hour (hexadecimal) */
@@ -98,11 +98,11 @@ vtx_pagereq_t;
/*
* Definitions for VTXIOC{GETSTAT,PUTSTAT}
*/
#define VTX_PAGESIZE (40 * 24)
#define VTX_VIRTUALSIZE (40 * 49)
typedef struct
typedef struct
{
int pagenum; /* number of page (hexadecimal) */
int hour; /* hour (hexadecimal) */
@@ -121,5 +121,5 @@ typedef struct
unsigned hamming : 1; /* hamming-error occurred */
}
vtx_pageinfo_t;
#endif /* _VTX_H */