Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: arch/x86/kernel/io_apic.c
This commit is contained in:
@@ -313,6 +313,7 @@ unifdef-y += ptrace.h
|
||||
unifdef-y += qnx4_fs.h
|
||||
unifdef-y += quota.h
|
||||
unifdef-y += random.h
|
||||
unifdef-y += irqnr.h
|
||||
unifdef-y += reboot.h
|
||||
unifdef-y += reiserfs_fs.h
|
||||
unifdef-y += reiserfs_xattr.h
|
||||
|
@@ -5,6 +5,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/aio_abi.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
@@ -183,7 +184,7 @@ struct kioctx {
|
||||
|
||||
/* This needs improving */
|
||||
unsigned long user_id;
|
||||
struct kioctx *next;
|
||||
struct hlist_node list;
|
||||
|
||||
wait_queue_head_t wait;
|
||||
|
||||
@@ -199,6 +200,8 @@ struct kioctx {
|
||||
struct aio_ring_info ring_info;
|
||||
|
||||
struct delayed_work wq;
|
||||
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/* prototypes */
|
||||
|
@@ -90,10 +90,11 @@ struct bio {
|
||||
|
||||
unsigned int bi_comp_cpu; /* completion CPU */
|
||||
|
||||
atomic_t bi_cnt; /* pin count */
|
||||
|
||||
struct bio_vec *bi_io_vec; /* the actual vec list */
|
||||
|
||||
bio_end_io_t *bi_end_io;
|
||||
atomic_t bi_cnt; /* pin count */
|
||||
|
||||
void *bi_private;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
@@ -101,6 +102,13 @@ struct bio {
|
||||
#endif
|
||||
|
||||
bio_destructor_t *bi_destructor; /* destructor */
|
||||
|
||||
/*
|
||||
* We can inline a number of vecs at the end of the bio, to avoid
|
||||
* double allocations for a small number of bio_vecs. This member
|
||||
* MUST obviously be kept at the very end of the bio.
|
||||
*/
|
||||
struct bio_vec bi_inline_vecs[0];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -117,6 +125,7 @@ struct bio {
|
||||
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
|
||||
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
|
||||
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
|
||||
#define BIO_QUIET 11 /* Make BIO Quiet */
|
||||
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
|
||||
|
||||
/*
|
||||
@@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int bio_has_allocated_vec(struct bio *bio)
|
||||
{
|
||||
return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
|
||||
}
|
||||
|
||||
/*
|
||||
* will die
|
||||
*/
|
||||
@@ -332,7 +346,7 @@ struct bio_pair {
|
||||
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
|
||||
extern void bio_pair_release(struct bio_pair *dbio);
|
||||
|
||||
extern struct bio_set *bioset_create(int, int);
|
||||
extern struct bio_set *bioset_create(unsigned int, unsigned int);
|
||||
extern void bioset_free(struct bio_set *);
|
||||
|
||||
extern struct bio *bio_alloc(gfp_t, int);
|
||||
@@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||
extern int bio_uncopy_user(struct bio *);
|
||||
void zero_fill_bio(struct bio *bio);
|
||||
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
|
||||
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
|
||||
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||
|
||||
/*
|
||||
@@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
|
||||
*/
|
||||
#define BIO_POOL_SIZE 2
|
||||
#define BIOVEC_NR_POOLS 6
|
||||
#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
|
||||
|
||||
struct bio_set {
|
||||
struct kmem_cache *bio_slab;
|
||||
unsigned int front_pad;
|
||||
|
||||
mempool_t *bio_pool;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
mempool_t *bio_integrity_pool;
|
||||
#endif
|
||||
mempool_t *bvec_pools[BIOVEC_NR_POOLS];
|
||||
mempool_t *bvec_pool;
|
||||
};
|
||||
|
||||
struct biovec_slab {
|
||||
@@ -411,6 +430,7 @@ struct biovec_slab {
|
||||
};
|
||||
|
||||
extern struct bio_set *fs_bio_set;
|
||||
extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
|
||||
|
||||
/*
|
||||
* a small number of entries is fine, not going to be performance critical.
|
||||
|
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
|
||||
|
||||
struct request_queue;
|
||||
struct elevator_queue;
|
||||
typedef struct elevator_queue elevator_t;
|
||||
struct request_pm_state;
|
||||
struct blk_trace;
|
||||
struct request;
|
||||
@@ -313,7 +312,7 @@ struct request_queue
|
||||
*/
|
||||
struct list_head queue_head;
|
||||
struct request *last_merge;
|
||||
elevator_t *elevator;
|
||||
struct elevator_queue *elevator;
|
||||
|
||||
/*
|
||||
* the queue request freelist, one for reads and one for writes
|
||||
@@ -449,6 +448,7 @@ struct request_queue
|
||||
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
|
||||
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
|
||||
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
|
||||
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||
|
||||
static inline int queue_is_locked(struct request_queue *q)
|
||||
{
|
||||
@@ -522,22 +522,32 @@ enum {
|
||||
* TAG_FLUSH : ordering by tag w/ pre and post flushes
|
||||
* TAG_FUA : ordering by tag w/ pre flush and FUA write
|
||||
*/
|
||||
QUEUE_ORDERED_NONE = 0x00,
|
||||
QUEUE_ORDERED_DRAIN = 0x01,
|
||||
QUEUE_ORDERED_TAG = 0x02,
|
||||
QUEUE_ORDERED_BY_DRAIN = 0x01,
|
||||
QUEUE_ORDERED_BY_TAG = 0x02,
|
||||
QUEUE_ORDERED_DO_PREFLUSH = 0x10,
|
||||
QUEUE_ORDERED_DO_BAR = 0x20,
|
||||
QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
|
||||
QUEUE_ORDERED_DO_FUA = 0x80,
|
||||
|
||||
QUEUE_ORDERED_PREFLUSH = 0x10,
|
||||
QUEUE_ORDERED_POSTFLUSH = 0x20,
|
||||
QUEUE_ORDERED_FUA = 0x40,
|
||||
QUEUE_ORDERED_NONE = 0x00,
|
||||
|
||||
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
|
||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
|
||||
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
|
||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
|
||||
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
|
||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
|
||||
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
|
||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
|
||||
QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
|
||||
QUEUE_ORDERED_DO_BAR,
|
||||
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
|
||||
QUEUE_ORDERED_DO_PREFLUSH |
|
||||
QUEUE_ORDERED_DO_POSTFLUSH,
|
||||
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
|
||||
QUEUE_ORDERED_DO_PREFLUSH |
|
||||
QUEUE_ORDERED_DO_FUA,
|
||||
|
||||
QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
|
||||
QUEUE_ORDERED_DO_BAR,
|
||||
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
|
||||
QUEUE_ORDERED_DO_PREFLUSH |
|
||||
QUEUE_ORDERED_DO_POSTFLUSH,
|
||||
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
|
||||
QUEUE_ORDERED_DO_PREFLUSH |
|
||||
QUEUE_ORDERED_DO_FUA,
|
||||
|
||||
/*
|
||||
* Ordered operation sequence
|
||||
@@ -585,7 +595,6 @@ enum {
|
||||
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
|
||||
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
|
||||
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
||||
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
|
||||
/* rq->queuelist of dequeued request must be list_empty() */
|
||||
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
|
||||
|
||||
@@ -855,10 +864,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
|
||||
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
||||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
|
||||
extern int blk_do_ordered(struct request_queue *, struct request **);
|
||||
extern bool blk_do_ordered(struct request_queue *, struct request **);
|
||||
extern unsigned blk_ordered_cur_seq(struct request_queue *);
|
||||
extern unsigned blk_ordered_req_seq(struct request *);
|
||||
extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
|
||||
extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
|
||||
|
||||
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
|
||||
extern void blk_dump_rq_flags(struct request *, char *);
|
||||
@@ -977,7 +986,6 @@ static inline void put_dev_sector(Sector p)
|
||||
|
||||
struct work_struct;
|
||||
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
||||
void kblockd_flush_work(struct work_struct *work);
|
||||
|
||||
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
||||
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
||||
|
@@ -2,7 +2,6 @@
|
||||
#define _LINUX_BH_H
|
||||
|
||||
extern void local_bh_disable(void);
|
||||
extern void __local_bh_enable(void);
|
||||
extern void _local_bh_enable(void);
|
||||
extern void local_bh_enable(void);
|
||||
extern void local_bh_enable_ip(unsigned long ip);
|
||||
|
@@ -35,6 +35,7 @@ enum bh_state_bits {
|
||||
BH_Ordered, /* ordered write */
|
||||
BH_Eopnotsupp, /* operation not supported (barrier) */
|
||||
BH_Unwritten, /* Buffer is allocated on disk but not written */
|
||||
BH_Quiet, /* Buffer Error Prinks to be quiet */
|
||||
|
||||
BH_PrivateStart,/* not a state bit, but the first bit available
|
||||
* for private allocation by other entities
|
||||
|
@@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty);
|
||||
#define VESA_HSYNC_SUSPEND 2
|
||||
#define VESA_POWERDOWN 3
|
||||
|
||||
#ifdef CONFIG_VGA_CONSOLE
|
||||
extern bool vgacon_text_force(void);
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_CONSOLE_H */
|
||||
|
@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
\
|
||||
if (unlikely(c)) { \
|
||||
if (!oops_in_progress && unlikely(c)) { \
|
||||
if (debug_locks_off() && !debug_locks_silent) \
|
||||
WARN_ON(1); \
|
||||
__ret = 1; \
|
||||
|
@@ -47,6 +47,7 @@ extern int dmi_name_in_vendors(const char *str);
|
||||
extern int dmi_name_in_serial(const char *str);
|
||||
extern int dmi_available;
|
||||
extern int dmi_walk(void (*decode)(const struct dmi_header *));
|
||||
extern bool dmi_match(enum dmi_field f, const char *str);
|
||||
|
||||
#else
|
||||
|
||||
@@ -61,6 +62,8 @@ static inline int dmi_name_in_serial(const char *s) { return 0; }
|
||||
#define dmi_available 0
|
||||
static inline int dmi_walk(void (*decode)(const struct dmi_header *))
|
||||
{ return -1; }
|
||||
static inline bool dmi_match(enum dmi_field f, const char *str)
|
||||
{ return false; }
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request
|
||||
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
|
||||
|
||||
typedef void *(elevator_init_fn) (struct request_queue *);
|
||||
typedef void (elevator_exit_fn) (elevator_t *);
|
||||
typedef void (elevator_exit_fn) (struct elevator_queue *);
|
||||
|
||||
struct elevator_ops
|
||||
{
|
||||
@@ -62,8 +62,8 @@ struct elevator_ops
|
||||
|
||||
struct elv_fs_entry {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(elevator_t *, char *);
|
||||
ssize_t (*store)(elevator_t *, const char *, size_t);
|
||||
ssize_t (*show)(struct elevator_queue *, char *);
|
||||
ssize_t (*store)(struct elevator_queue *, const char *, size_t);
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
|
||||
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
||||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(elevator_t *);
|
||||
extern void elevator_exit(struct elevator_queue *);
|
||||
extern int elv_rq_merge_ok(struct request *, struct bio *);
|
||||
|
||||
/*
|
||||
|
@@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
|
||||
|
||||
#endif /* CONFIG_FAULT_INJECTION */
|
||||
|
||||
#ifdef CONFIG_FAILSLAB
|
||||
extern bool should_failslab(size_t size, gfp_t gfpflags);
|
||||
#else
|
||||
static inline bool should_failslab(size_t size, gfp_t gfpflags)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_FAILSLAB */
|
||||
|
||||
#endif /* _LINUX_FAULT_INJECT_H */
|
||||
|
@@ -82,6 +82,14 @@ extern int dir_notify_enable;
|
||||
(specialy hack for floppy.c) */
|
||||
#define FMODE_WRITE_IOCTL ((__force fmode_t)128)
|
||||
|
||||
/*
|
||||
* Don't update ctime and mtime.
|
||||
*
|
||||
* Currently a special hack for the XFS open_by_handle ioctl, but we'll
|
||||
* hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
|
||||
*/
|
||||
#define FMODE_NOCMTIME ((__force fmode_t)2048)
|
||||
|
||||
#define RW_MASK 1
|
||||
#define RWA_MASK 2
|
||||
#define READ 0
|
||||
@@ -1877,7 +1885,9 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
|
||||
|
||||
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
|
||||
|
||||
extern struct inode * inode_init_always(struct super_block *, struct inode *);
|
||||
extern void inode_init_once(struct inode *);
|
||||
extern void inode_add_to_lists(struct super_block *, struct inode *);
|
||||
extern void iput(struct inode *);
|
||||
extern struct inode * igrab(struct inode *);
|
||||
extern ino_t iunique(struct super_block *, ino_t);
|
||||
|
@@ -25,7 +25,8 @@ union ktime;
|
||||
#define FUTEX_WAKE_BITSET 10
|
||||
|
||||
#define FUTEX_PRIVATE_FLAG 128
|
||||
#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
|
||||
#define FUTEX_CLOCK_REALTIME 256
|
||||
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
||||
|
||||
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
|
||||
@@ -164,6 +165,8 @@ union futex_key {
|
||||
} both;
|
||||
};
|
||||
|
||||
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
|
||||
|
||||
#ifdef CONFIG_FUTEX
|
||||
extern void exit_robust_list(struct task_struct *curr);
|
||||
extern void exit_pi_state_list(struct task_struct *curr);
|
||||
|
@@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter {
|
||||
struct disk_part_tbl {
|
||||
struct rcu_head rcu_head;
|
||||
int len;
|
||||
struct hd_struct *last_lookup;
|
||||
struct hd_struct *part[];
|
||||
};
|
||||
|
||||
|
@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ)
|
||||
#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
|
||||
extern void rcu_irq_enter(void);
|
||||
extern void rcu_irq_exit(void);
|
||||
extern void rcu_nmi_enter(void);
|
||||
extern void rcu_nmi_exit(void);
|
||||
#else
|
||||
# define rcu_irq_enter() do { } while (0)
|
||||
# define rcu_irq_exit() do { } while (0)
|
||||
#endif /* CONFIG_PREEMPT_RCU */
|
||||
# define rcu_nmi_enter() do { } while (0)
|
||||
# define rcu_nmi_exit() do { } while (0)
|
||||
#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
|
||||
|
||||
/*
|
||||
* It is safe to do non-atomic ops on ->hardirq_context,
|
||||
@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void);
|
||||
*/
|
||||
#define __irq_enter() \
|
||||
do { \
|
||||
rcu_irq_enter(); \
|
||||
account_system_vtime(current); \
|
||||
add_preempt_count(HARDIRQ_OFFSET); \
|
||||
trace_hardirq_enter(); \
|
||||
@@ -154,7 +157,6 @@ extern void irq_enter(void);
|
||||
trace_hardirq_exit(); \
|
||||
account_system_vtime(current); \
|
||||
sub_preempt_count(HARDIRQ_OFFSET); \
|
||||
rcu_irq_exit(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@@ -166,11 +168,14 @@ extern void irq_exit(void);
|
||||
do { \
|
||||
ftrace_nmi_enter(); \
|
||||
lockdep_off(); \
|
||||
rcu_nmi_enter(); \
|
||||
__irq_enter(); \
|
||||
} while (0)
|
||||
|
||||
#define nmi_exit() \
|
||||
do { \
|
||||
__irq_exit(); \
|
||||
rcu_nmi_exit(); \
|
||||
lockdep_on(); \
|
||||
ftrace_nmi_exit(); \
|
||||
} while (0)
|
||||
|
@@ -42,26 +42,6 @@ enum hrtimer_restart {
|
||||
HRTIMER_RESTART, /* Timer must be restarted */
|
||||
};
|
||||
|
||||
/*
|
||||
* hrtimer callback modes:
|
||||
*
|
||||
* HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
|
||||
* HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
|
||||
* Special mode for tick emulation and
|
||||
* scheduler timer. Such timers are per
|
||||
* cpu and not allowed to be migrated on
|
||||
* cpu unplug.
|
||||
* HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
|
||||
* with timer->base lock unlocked
|
||||
* used for timers which call wakeup to
|
||||
* avoid lock order problems with rq->lock
|
||||
*/
|
||||
enum hrtimer_cb_mode {
|
||||
HRTIMER_CB_SOFTIRQ,
|
||||
HRTIMER_CB_IRQSAFE_PERCPU,
|
||||
HRTIMER_CB_IRQSAFE_UNLOCKED,
|
||||
};
|
||||
|
||||
/*
|
||||
* Values to track state of the timer
|
||||
*
|
||||
@@ -70,7 +50,6 @@ enum hrtimer_cb_mode {
|
||||
* 0x00 inactive
|
||||
* 0x01 enqueued into rbtree
|
||||
* 0x02 callback function running
|
||||
* 0x04 callback pending (high resolution mode)
|
||||
*
|
||||
* Special cases:
|
||||
* 0x03 callback function running and enqueued
|
||||
@@ -92,8 +71,7 @@ enum hrtimer_cb_mode {
|
||||
#define HRTIMER_STATE_INACTIVE 0x00
|
||||
#define HRTIMER_STATE_ENQUEUED 0x01
|
||||
#define HRTIMER_STATE_CALLBACK 0x02
|
||||
#define HRTIMER_STATE_PENDING 0x04
|
||||
#define HRTIMER_STATE_MIGRATE 0x08
|
||||
#define HRTIMER_STATE_MIGRATE 0x04
|
||||
|
||||
/**
|
||||
* struct hrtimer - the basic hrtimer structure
|
||||
@@ -109,8 +87,6 @@ enum hrtimer_cb_mode {
|
||||
* @function: timer expiry callback function
|
||||
* @base: pointer to the timer base (per cpu and per clock)
|
||||
* @state: state information (See bit values above)
|
||||
* @cb_mode: high resolution timer feature to select the callback execution
|
||||
* mode
|
||||
* @cb_entry: list head to enqueue an expired timer into the callback list
|
||||
* @start_site: timer statistics field to store the site where the timer
|
||||
* was started
|
||||
@@ -129,7 +105,6 @@ struct hrtimer {
|
||||
struct hrtimer_clock_base *base;
|
||||
unsigned long state;
|
||||
struct list_head cb_entry;
|
||||
enum hrtimer_cb_mode cb_mode;
|
||||
#ifdef CONFIG_TIMER_STATS
|
||||
int start_pid;
|
||||
void *start_site;
|
||||
@@ -188,15 +163,11 @@ struct hrtimer_clock_base {
|
||||
* @check_clocks: Indictator, when set evaluate time source and clock
|
||||
* event devices whether high resolution mode can be
|
||||
* activated.
|
||||
* @cb_pending: Expired timers are moved from the rbtree to this
|
||||
* list in the timer interrupt. The list is processed
|
||||
* in the softirq.
|
||||
* @nr_events: Total number of timer interrupt events
|
||||
*/
|
||||
struct hrtimer_cpu_base {
|
||||
spinlock_t lock;
|
||||
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
|
||||
struct list_head cb_pending;
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
ktime_t expires_next;
|
||||
int hres_active;
|
||||
@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
|
||||
*/
|
||||
static inline int hrtimer_is_queued(struct hrtimer *timer)
|
||||
{
|
||||
return timer->state &
|
||||
(HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
|
||||
return timer->state & HRTIMER_STATE_ENQUEUED;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -122,8 +122,6 @@ struct ide_io_ports {
|
||||
#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
|
||||
#define SECTOR_SIZE 512
|
||||
|
||||
#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
|
||||
|
||||
/*
|
||||
* Timeouts for various operations:
|
||||
*/
|
||||
@@ -172,9 +170,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *);
|
||||
enum { ide_unknown, ide_generic, ide_pci,
|
||||
ide_cmd640, ide_dtc2278, ide_ali14xx,
|
||||
ide_qd65xx, ide_umc8672, ide_ht6560b,
|
||||
ide_rz1000, ide_trm290,
|
||||
ide_cmd646, ide_cy82c693, ide_4drives,
|
||||
ide_pmac, ide_acorn,
|
||||
ide_4drives, ide_pmac, ide_acorn,
|
||||
ide_au1xxx, ide_palm3710
|
||||
};
|
||||
|
||||
@@ -496,8 +492,6 @@ enum {
|
||||
* when more than one interrupt is needed.
|
||||
*/
|
||||
IDE_AFLAG_LIMIT_NFRAMES = (1 << 7),
|
||||
/* Seeking in progress. */
|
||||
IDE_AFLAG_SEEKING = (1 << 8),
|
||||
/* Saved TOC information is current. */
|
||||
IDE_AFLAG_TOC_VALID = (1 << 9),
|
||||
/* We think that the drive door is locked. */
|
||||
@@ -845,8 +839,6 @@ typedef struct hwif_s {
|
||||
unsigned extra_ports; /* number of extra dma ports */
|
||||
|
||||
unsigned present : 1; /* this interface exists */
|
||||
unsigned serialized : 1; /* serialized all channel operation */
|
||||
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
|
||||
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
|
||||
|
||||
struct device gendev;
|
||||
@@ -909,6 +901,8 @@ typedef struct hwgroup_s {
|
||||
|
||||
int req_gen;
|
||||
int req_gen_timer;
|
||||
|
||||
spinlock_t lock;
|
||||
} ide_hwgroup_t;
|
||||
|
||||
typedef struct ide_driver_s ide_driver_t;
|
||||
@@ -1122,6 +1116,14 @@ enum {
|
||||
IDE_PM_COMPLETED,
|
||||
};
|
||||
|
||||
int generic_ide_suspend(struct device *, pm_message_t);
|
||||
int generic_ide_resume(struct device *);
|
||||
|
||||
void ide_complete_power_step(ide_drive_t *, struct request *);
|
||||
ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
|
||||
void ide_complete_pm_request(ide_drive_t *, struct request *);
|
||||
void ide_check_pm_state(ide_drive_t *, struct request *);
|
||||
|
||||
/*
|
||||
* Subdrivers support.
|
||||
*
|
||||
@@ -1376,8 +1378,8 @@ enum {
|
||||
IDE_HFLAG_LEGACY_IRQS = (1 << 21),
|
||||
/* force use of legacy IRQs */
|
||||
IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
|
||||
/* limit LBA48 requests to 256 sectors */
|
||||
IDE_HFLAG_RQSIZE_256 = (1 << 23),
|
||||
/* host is TRM290 */
|
||||
IDE_HFLAG_TRM290 = (1 << 23),
|
||||
/* use 32-bit I/O ops */
|
||||
IDE_HFLAG_IO_32BIT = (1 << 24),
|
||||
/* unmask IRQs */
|
||||
@@ -1415,6 +1417,9 @@ struct ide_port_info {
|
||||
|
||||
ide_pci_enablebit_t enablebits[2];
|
||||
hwif_chipset_t chipset;
|
||||
|
||||
u16 max_sectors; /* if < than the default one */
|
||||
|
||||
u32 host_flags;
|
||||
u8 pio_mask;
|
||||
u8 swdma_mask;
|
||||
@@ -1610,13 +1615,13 @@ extern struct mutex ide_cfg_mtx;
|
||||
/*
|
||||
* Structure locking:
|
||||
*
|
||||
* ide_cfg_mtx and ide_lock together protect changes to
|
||||
* ide_hwif_t->{next,hwgroup}
|
||||
* ide_cfg_mtx and hwgroup->lock together protect changes to
|
||||
* ide_hwif_t->next
|
||||
* ide_drive_t->next
|
||||
*
|
||||
* ide_hwgroup_t->busy: ide_lock
|
||||
* ide_hwgroup_t->hwif: ide_lock
|
||||
* ide_hwif_t->mate: constant, no locking
|
||||
* ide_hwgroup_t->busy: hwgroup->lock
|
||||
* ide_hwgroup_t->hwif: hwgroup->lock
|
||||
* ide_hwif_t->{hwgroup,mate}: constant, no locking
|
||||
* ide_drive_t->hwif: constant, no locking
|
||||
*/
|
||||
|
||||
|
@@ -14,6 +14,8 @@
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/irqnr.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
@@ -251,9 +253,6 @@ enum
|
||||
BLOCK_SOFTIRQ,
|
||||
TASKLET_SOFTIRQ,
|
||||
SCHED_SOFTIRQ,
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
HRTIMER_SOFTIRQ,
|
||||
#endif
|
||||
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
|
||||
|
||||
NR_SOFTIRQS
|
||||
|
@@ -130,9 +130,14 @@ struct irq_chip {
|
||||
const char *typename;
|
||||
};
|
||||
|
||||
struct timer_rand_state;
|
||||
struct irq_2_iommu;
|
||||
/**
|
||||
* struct irq_desc - interrupt descriptor
|
||||
* @irq: interrupt number for this descriptor
|
||||
* @timer_rand_state: pointer to timer rand state struct
|
||||
* @kstat_irqs: irq stats per cpu
|
||||
* @irq_2_iommu: iommu with this irq
|
||||
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
|
||||
* @chip: low level interrupt hardware access
|
||||
* @msi_desc: MSI descriptor
|
||||
@@ -144,8 +149,8 @@ struct irq_chip {
|
||||
* @depth: disable-depth, for nested irq_disable() calls
|
||||
* @wake_depth: enable depth, for multiple set_irq_wake() callers
|
||||
* @irq_count: stats field to detect stalled irqs
|
||||
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
||||
* @last_unhandled: aging timer for unhandled count
|
||||
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
||||
* @lock: locking for SMP
|
||||
* @affinity: IRQ affinity on SMP
|
||||
* @cpu: cpu index useful for balancing
|
||||
@@ -155,6 +160,13 @@ struct irq_chip {
|
||||
*/
|
||||
struct irq_desc {
|
||||
unsigned int irq;
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
struct timer_rand_state *timer_rand_state;
|
||||
unsigned int *kstat_irqs;
|
||||
# ifdef CONFIG_INTR_REMAP
|
||||
struct irq_2_iommu *irq_2_iommu;
|
||||
# endif
|
||||
#endif
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irq_chip *chip;
|
||||
struct msi_desc *msi_desc;
|
||||
@@ -166,8 +178,8 @@ struct irq_desc {
|
||||
unsigned int depth; /* nested irq disables */
|
||||
unsigned int wake_depth; /* nested wake enables */
|
||||
unsigned int irq_count; /* For detecting broken IRQs */
|
||||
unsigned int irqs_unhandled;
|
||||
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
||||
unsigned int irqs_unhandled;
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_t affinity;
|
||||
@@ -182,12 +194,51 @@ struct irq_desc {
|
||||
const char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
extern void early_irq_init(void);
|
||||
extern void arch_early_irq_init(void);
|
||||
extern void arch_init_chip_data(struct irq_desc *desc, int cpu);
|
||||
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int cpu);
|
||||
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
|
||||
static inline struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < nr_irqs) ? irq_desc + irq : NULL;
|
||||
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
|
||||
}
|
||||
static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
return irq_to_desc(irq);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
extern struct irq_desc *irq_to_desc(unsigned int irq);
|
||||
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
|
||||
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
|
||||
|
||||
# define for_each_irq_desc(irq, desc) \
|
||||
for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq))
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq))
|
||||
|
||||
#define kstat_irqs_this_cpu(DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()])
|
||||
#define kstat_incr_irqs_this_cpu(irqno, DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()]++)
|
||||
|
||||
#endif
|
||||
|
||||
static inline struct irq_desc *
|
||||
irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
|
||||
return irq_to_desc(irq);
|
||||
#else
|
||||
return desc;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -381,6 +432,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
||||
#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
|
||||
#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
|
||||
|
||||
#define get_irq_desc_chip(desc) ((desc)->chip)
|
||||
#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
|
||||
#define get_irq_desc_data(desc) ((desc)->handler_data)
|
||||
#define get_irq_desc_msi(desc) ((desc)->msi_desc)
|
||||
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
#endif /* !CONFIG_S390 */
|
||||
|
@@ -1,24 +1,38 @@
|
||||
#ifndef _LINUX_IRQNR_H
|
||||
#define _LINUX_IRQNR_H
|
||||
|
||||
/*
|
||||
* Generic irq_desc iterators:
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||
#include <asm/irq.h>
|
||||
# define nr_irqs NR_IRQS
|
||||
|
||||
# define for_each_irq_desc(irq, desc) \
|
||||
for (irq = 0; irq < nr_irqs; irq++)
|
||||
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs - 1; irq >= 0; irq--)
|
||||
#else
|
||||
|
||||
extern int nr_irqs;
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
|
||||
struct irq_desc;
|
||||
# define for_each_irq_desc(irq, desc) \
|
||||
for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
|
||||
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
|
||||
irq >= 0; irq--, desc--)
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
|
||||
irq >= 0; irq--, desc--)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define for_each_irq_nr(irq) \
|
||||
for (irq = 0; irq < nr_irqs; irq++)
|
||||
#define for_each_irq_nr(irq) \
|
||||
for (irq = 0; irq < nr_irqs; irq++)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
@@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void)
|
||||
((long)(a) - (long)(b) >= 0))
|
||||
#define time_before_eq(a,b) time_after_eq(b,a)
|
||||
|
||||
/*
|
||||
* Calculate whether a is in the range of [b, c].
|
||||
*/
|
||||
#define time_in_range(a,b,c) \
|
||||
(time_after_eq(a,b) && \
|
||||
time_before_eq(a,c))
|
||||
|
||||
/*
|
||||
* Calculate whether a is in the range of [b, c).
|
||||
*/
|
||||
#define time_in_range_open(a,b,c) \
|
||||
(time_after_eq(a,b) && \
|
||||
time_before(a,c))
|
||||
|
||||
/* Same as above, but does so with platform independent 64bit types.
|
||||
* These must be used when utilizing jiffies_64 (i.e. return value of
|
||||
* get_jiffies_64() */
|
||||
|
@@ -141,6 +141,15 @@ extern int _cond_resched(void);
|
||||
(__x < 0) ? -__x : __x; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
void might_fault(void);
|
||||
#else
|
||||
static inline void might_fault(void)
|
||||
{
|
||||
might_sleep();
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct atomic_notifier_head panic_notifier_list;
|
||||
extern long (*panic_blink)(long time);
|
||||
NORET_TYPE void panic(const char * fmt, ...)
|
||||
@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
|
||||
extern int core_kernel_text(unsigned long addr);
|
||||
extern int __kernel_text_address(unsigned long addr);
|
||||
extern int kernel_text_address(unsigned long addr);
|
||||
extern int func_ptr_is_kernel_text(void *ptr);
|
||||
|
||||
struct pid;
|
||||
extern struct pid *session_of_pgrp(struct pid *pgrp);
|
||||
|
||||
|
@@ -28,7 +28,9 @@ struct cpu_usage_stat {
|
||||
|
||||
struct kernel_stat {
|
||||
struct cpu_usage_stat cpustat;
|
||||
unsigned int irqs[NR_IRQS];
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
unsigned int irqs[NR_IRQS];
|
||||
#endif
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
||||
@@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
|
||||
|
||||
extern unsigned long long nr_context_switches(void);
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
#define kstat_irqs_this_cpu(irq) \
|
||||
(kstat_this_cpu.irqs[irq])
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
||||
@@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
||||
{
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
return kstat_cpu(cpu).irqs[irq];
|
||||
}
|
||||
#else
|
||||
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Number of interrupts per specific IRQ source, since bootup
|
||||
|
@@ -54,9 +54,13 @@ struct lguest_vqconfig {
|
||||
/* Write command first word is a request. */
|
||||
enum lguest_req
|
||||
{
|
||||
LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */
|
||||
LHREQ_INITIALIZE, /* + base, pfnlimit, start */
|
||||
LHREQ_GETDMA, /* No longer used */
|
||||
LHREQ_IRQ, /* + irq */
|
||||
LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
|
||||
};
|
||||
|
||||
/* The alignment to use between consumer and producer parts of vring.
|
||||
* x86 pagesize for historical reasons. */
|
||||
#define LGUEST_VRING_ALIGN 4096
|
||||
#endif /* _LINUX_LGUEST_LAUNCHER */
|
||||
|
@@ -213,10 +213,11 @@ enum {
|
||||
ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
|
||||
ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
|
||||
ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
|
||||
ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
|
||||
ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
|
||||
ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */
|
||||
ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */
|
||||
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
|
||||
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
|
||||
|
||||
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
|
||||
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
|
||||
@@ -1285,26 +1286,62 @@ static inline int ata_link_active(struct ata_link *link)
|
||||
return ata_tag_valid(link->active_tag) || link->sactive;
|
||||
}
|
||||
|
||||
extern struct ata_link *__ata_port_next_link(struct ata_port *ap,
|
||||
struct ata_link *link,
|
||||
bool dev_only);
|
||||
/*
|
||||
* Iterators
|
||||
*
|
||||
* ATA_LITER_* constants are used to select link iteration mode and
|
||||
* ATA_DITER_* device iteration mode.
|
||||
*
|
||||
* For a custom iteration directly using ata_{link|dev}_next(), if
|
||||
* @link or @dev, respectively, is NULL, the first element is
|
||||
* returned. @dev and @link can be any valid device or link and the
|
||||
* next element according to the iteration mode will be returned.
|
||||
* After the last element, NULL is returned.
|
||||
*/
|
||||
enum ata_link_iter_mode {
|
||||
ATA_LITER_EDGE, /* if present, PMP links only; otherwise,
|
||||
* host link. no slave link */
|
||||
ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */
|
||||
ATA_LITER_PMP_FIRST, /* PMP links followed by host link,
|
||||
* slave link still comes after host link */
|
||||
};
|
||||
|
||||
#define __ata_port_for_each_link(link, ap) \
|
||||
for ((link) = __ata_port_next_link((ap), NULL, false); (link); \
|
||||
(link) = __ata_port_next_link((ap), (link), false))
|
||||
enum ata_dev_iter_mode {
|
||||
ATA_DITER_ENABLED,
|
||||
ATA_DITER_ENABLED_REVERSE,
|
||||
ATA_DITER_ALL,
|
||||
ATA_DITER_ALL_REVERSE,
|
||||
};
|
||||
|
||||
#define ata_port_for_each_link(link, ap) \
|
||||
for ((link) = __ata_port_next_link((ap), NULL, true); (link); \
|
||||
(link) = __ata_port_next_link((ap), (link), true))
|
||||
extern struct ata_link *ata_link_next(struct ata_link *link,
|
||||
struct ata_port *ap,
|
||||
enum ata_link_iter_mode mode);
|
||||
|
||||
#define ata_link_for_each_dev(dev, link) \
|
||||
for ((dev) = (link)->device; \
|
||||
(dev) < (link)->device + ata_link_max_devices(link) || ((dev) = NULL); \
|
||||
(dev)++)
|
||||
extern struct ata_device *ata_dev_next(struct ata_device *dev,
|
||||
struct ata_link *link,
|
||||
enum ata_dev_iter_mode mode);
|
||||
|
||||
#define ata_link_for_each_dev_reverse(dev, link) \
|
||||
for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \
|
||||
(dev) >= (link)->device || ((dev) = NULL); (dev)--)
|
||||
/*
|
||||
* Shortcut notation for iterations
|
||||
*
|
||||
* ata_for_each_link() iterates over each link of @ap according to
|
||||
* @mode. @link points to the current link in the loop. @link is
|
||||
* NULL after loop termination. ata_for_each_dev() works the same way
|
||||
* except that it iterates over each device of @link.
|
||||
*
|
||||
* Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be
|
||||
* specified when using the following shorthand notations. Only the
|
||||
* mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be
|
||||
* specified. This not only increases brevity but also makes it
|
||||
* impossible to use ATA_LITER_* for device iteration or vice-versa.
|
||||
*/
|
||||
#define ata_for_each_link(link, ap, mode) \
|
||||
for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \
|
||||
(link) = ata_link_next((link), (ap), ATA_LITER_##mode))
|
||||
|
||||
#define ata_for_each_dev(dev, link, mode) \
|
||||
for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \
|
||||
(dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
|
||||
|
||||
/**
|
||||
* ata_ncq_enabled - Test whether NCQ is enabled
|
||||
|
@@ -41,6 +41,7 @@ struct nlmclnt_initdata {
|
||||
size_t addrlen;
|
||||
unsigned short protocol;
|
||||
u32 nfs_version;
|
||||
int noresvport;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -49,6 +49,7 @@ struct nlm_host {
|
||||
unsigned short h_proto; /* transport proto */
|
||||
unsigned short h_reclaiming : 1,
|
||||
h_server : 1, /* server side, not client side */
|
||||
h_noresvport : 1,
|
||||
h_inuse : 1;
|
||||
wait_queue_head_t h_gracewait; /* wait while reclaiming */
|
||||
struct rw_semaphore h_rwsem; /* Reboot recovery lock */
|
||||
@@ -220,7 +221,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
|
||||
const size_t salen,
|
||||
const unsigned short protocol,
|
||||
const u32 version,
|
||||
const char *hostname);
|
||||
const char *hostname,
|
||||
int noresvport);
|
||||
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
|
||||
const char *hostname,
|
||||
const size_t hostname_len);
|
||||
|
@@ -73,6 +73,8 @@ struct lock_class_key {
|
||||
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
|
||||
};
|
||||
|
||||
#define LOCKSTAT_POINTS 4
|
||||
|
||||
/*
|
||||
* The lock-class itself:
|
||||
*/
|
||||
@@ -119,7 +121,8 @@ struct lock_class {
|
||||
int name_version;
|
||||
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
unsigned long contention_point[4];
|
||||
unsigned long contention_point[LOCKSTAT_POINTS];
|
||||
unsigned long contending_point[LOCKSTAT_POINTS];
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -144,6 +147,7 @@ enum bounce_type {
|
||||
|
||||
struct lock_class_stats {
|
||||
unsigned long contention_point[4];
|
||||
unsigned long contending_point[4];
|
||||
struct lock_time read_waittime;
|
||||
struct lock_time write_waittime;
|
||||
struct lock_time read_holdtime;
|
||||
@@ -165,6 +169,7 @@ struct lockdep_map {
|
||||
const char *name;
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
int cpu;
|
||||
unsigned long ip;
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
extern void lock_release(struct lockdep_map *lock, int nested,
|
||||
unsigned long ip);
|
||||
|
||||
extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
|
||||
unsigned long ip);
|
||||
extern void lock_set_class(struct lockdep_map *lock, const char *name,
|
||||
struct lock_class_key *key, unsigned int subclass,
|
||||
unsigned long ip);
|
||||
|
||||
static inline void lock_set_subclass(struct lockdep_map *lock,
|
||||
unsigned int subclass, unsigned long ip)
|
||||
{
|
||||
lock_set_class(lock, lock->name, lock->key, subclass, ip);
|
||||
}
|
||||
|
||||
# define INIT_LOCKDEP .lockdep_recursion = 0,
|
||||
|
||||
@@ -328,6 +340,7 @@ static inline void lockdep_on(void)
|
||||
|
||||
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
|
||||
# define lock_release(l, n, i) do { } while (0)
|
||||
# define lock_set_class(l, n, k, s, i) do { } while (0)
|
||||
# define lock_set_subclass(l, s, i) do { } while (0)
|
||||
# define lockdep_init() do { } while (0)
|
||||
# define lockdep_info() do { } while (0)
|
||||
@@ -356,7 +369,7 @@ struct lock_class_key { };
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
|
||||
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
|
||||
extern void lock_acquired(struct lockdep_map *lock);
|
||||
extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
|
||||
|
||||
#define LOCK_CONTENDED(_lock, try, lock) \
|
||||
do { \
|
||||
@@ -364,20 +377,20 @@ do { \
|
||||
lock_contended(&(_lock)->dep_map, _RET_IP_); \
|
||||
lock(_lock); \
|
||||
} \
|
||||
lock_acquired(&(_lock)->dep_map); \
|
||||
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
|
||||
} while (0)
|
||||
|
||||
#else /* CONFIG_LOCK_STAT */
|
||||
|
||||
#define lock_contended(lockdep_map, ip) do {} while (0)
|
||||
#define lock_acquired(lockdep_map) do {} while (0)
|
||||
#define lock_acquired(lockdep_map, ip) do {} while (0)
|
||||
|
||||
#define LOCK_CONTENDED(_lock, try, lock) \
|
||||
lock(_lock)
|
||||
|
||||
#endif /* CONFIG_LOCK_STAT */
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
extern void early_init_irq_lock_class(void);
|
||||
#else
|
||||
static inline void early_init_irq_lock_class(void)
|
||||
@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
||||
# define lock_map_release(l) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
# define might_lock(lock) \
|
||||
do { \
|
||||
typecheck(struct lockdep_map *, &(lock)->dep_map); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
|
||||
} while (0)
|
||||
# define might_lock_read(lock) \
|
||||
do { \
|
||||
typecheck(struct lockdep_map *, &(lock)->dep_map); \
|
||||
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
|
||||
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
|
||||
} while (0)
|
||||
#else
|
||||
# define might_lock(lock) do { } while (0)
|
||||
# define might_lock_read(lock) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_LOCKDEP_H */
|
||||
|
@@ -232,8 +232,9 @@ struct mm_struct {
|
||||
struct core_state *core_state; /* coredumping support */
|
||||
|
||||
/* aio bits */
|
||||
rwlock_t ioctx_list_lock; /* aio lock */
|
||||
struct kioctx *ioctx_list;
|
||||
spinlock_t ioctx_lock;
|
||||
struct hlist_head ioctx_list;
|
||||
|
||||
#ifdef CONFIG_MM_OWNER
|
||||
/*
|
||||
* "owner" points to a task that is regarded as the canonical
|
||||
|
@@ -10,8 +10,11 @@ struct msi_msg {
|
||||
};
|
||||
|
||||
/* Helper functions */
|
||||
struct irq_desc;
|
||||
extern void mask_msi_irq(unsigned int irq);
|
||||
extern void unmask_msi_irq(unsigned int irq);
|
||||
extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
|
||||
extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
|
||||
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
|
||||
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
|
||||
|
||||
|
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
|
||||
/*
|
||||
* NOTE: mutex_trylock() follows the spin_trylock() convention,
|
||||
* not the down_trylock() convention!
|
||||
*
|
||||
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
|
||||
*/
|
||||
extern int mutex_trylock(struct mutex *lock);
|
||||
extern void mutex_unlock(struct mutex *lock);
|
||||
|
@@ -83,7 +83,7 @@ struct nfs_open_context {
|
||||
struct rpc_cred *cred;
|
||||
struct nfs4_state *state;
|
||||
fl_owner_t lockowner;
|
||||
int mode;
|
||||
fmode_t mode;
|
||||
|
||||
unsigned long flags;
|
||||
#define NFS_CONTEXT_ERROR_WRITE (0)
|
||||
@@ -130,7 +130,10 @@ struct nfs_inode {
|
||||
*
|
||||
* We need to revalidate the cached attrs for this inode if
|
||||
*
|
||||
* jiffies - read_cache_jiffies > attrtimeo
|
||||
* jiffies - read_cache_jiffies >= attrtimeo
|
||||
*
|
||||
* Please note the comparison is greater than or equal
|
||||
* so that zero timeout values can be specified.
|
||||
*/
|
||||
unsigned long read_cache_jiffies;
|
||||
unsigned long attrtimeo;
|
||||
@@ -180,7 +183,7 @@ struct nfs_inode {
|
||||
/* NFSv4 state */
|
||||
struct list_head open_states;
|
||||
struct nfs_delegation *delegation;
|
||||
int delegation_state;
|
||||
fmode_t delegation_state;
|
||||
struct rw_semaphore rwsem;
|
||||
#endif /* CONFIG_NFS_V4*/
|
||||
struct inode vfs_inode;
|
||||
@@ -342,7 +345,7 @@ extern int nfs_setattr(struct dentry *, struct iattr *);
|
||||
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
|
||||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
|
||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
|
||||
extern u64 nfs_compat_user_ino64(u64 fileid);
|
||||
extern void nfs_fattr_init(struct nfs_fattr *fattr);
|
||||
|
||||
@@ -532,12 +535,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode)
|
||||
}
|
||||
#endif /* CONFIG_NFS_V3_ACL */
|
||||
|
||||
/*
|
||||
* linux/fs/mount_clnt.c
|
||||
*/
|
||||
extern int nfs_mount(struct sockaddr *, size_t, char *, char *,
|
||||
int, int, struct nfs_fh *);
|
||||
|
||||
/*
|
||||
* inline functions
|
||||
*/
|
||||
|
@@ -42,12 +42,6 @@ struct nfs_client {
|
||||
struct rb_root cl_openowner_id;
|
||||
struct rb_root cl_lockowner_id;
|
||||
|
||||
/*
|
||||
* The following rwsem ensures exclusive access to the server
|
||||
* while we recover the state following a lease expiration.
|
||||
*/
|
||||
struct rw_semaphore cl_sem;
|
||||
|
||||
struct list_head cl_delegations;
|
||||
struct rb_root cl_state_owners;
|
||||
spinlock_t cl_lock;
|
||||
|
@@ -45,7 +45,7 @@ struct nfs_mount_data {
|
||||
char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */
|
||||
};
|
||||
|
||||
/* bits in the flags field */
|
||||
/* bits in the flags field visible to user space */
|
||||
|
||||
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
|
||||
#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
|
||||
@@ -68,5 +68,6 @@ struct nfs_mount_data {
|
||||
/* The following are for internal use only */
|
||||
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
|
||||
#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
|
||||
#define NFS_MOUNT_NORESVPORT 0x40000
|
||||
|
||||
#endif
|
||||
|
@@ -120,13 +120,14 @@ struct nfs_openargs {
|
||||
const struct nfs_fh * fh;
|
||||
struct nfs_seqid * seqid;
|
||||
int open_flags;
|
||||
fmode_t fmode;
|
||||
__u64 clientid;
|
||||
__u64 id;
|
||||
union {
|
||||
struct iattr * attrs; /* UNCHECKED, GUARDED */
|
||||
nfs4_verifier verifier; /* EXCLUSIVE */
|
||||
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
|
||||
int delegation_type; /* CLAIM_PREVIOUS */
|
||||
fmode_t delegation_type; /* CLAIM_PREVIOUS */
|
||||
} u;
|
||||
const struct qstr * name;
|
||||
const struct nfs_server *server; /* Needed for ID mapping */
|
||||
@@ -143,7 +144,7 @@ struct nfs_openres {
|
||||
struct nfs_fattr * dir_attr;
|
||||
struct nfs_seqid * seqid;
|
||||
const struct nfs_server *server;
|
||||
int delegation_type;
|
||||
fmode_t delegation_type;
|
||||
nfs4_stateid delegation;
|
||||
__u32 do_recall;
|
||||
__u64 maxsize;
|
||||
@@ -171,7 +172,7 @@ struct nfs_closeargs {
|
||||
struct nfs_fh * fh;
|
||||
nfs4_stateid * stateid;
|
||||
struct nfs_seqid * seqid;
|
||||
int open_flags;
|
||||
fmode_t fmode;
|
||||
const u32 * bitmask;
|
||||
};
|
||||
|
||||
|
@@ -124,6 +124,8 @@ struct nfs4_client {
|
||||
nfs4_verifier cl_verifier; /* generated by client */
|
||||
time_t cl_time; /* time of last lease renewal */
|
||||
__be32 cl_addr; /* client ipaddress */
|
||||
u32 cl_flavor; /* setclientid pseudoflavor */
|
||||
char *cl_principal; /* setclientid principal name */
|
||||
struct svc_cred cl_cred; /* setclientid principal */
|
||||
clientid_t cl_clientid; /* generated by server */
|
||||
nfs4_verifier cl_confirm; /* generated by server */
|
||||
|
@@ -26,8 +26,7 @@ extern struct bus_type of_platform_bus_type;
|
||||
|
||||
/*
|
||||
* An of_platform_driver driver is attached to a basic of_device on
|
||||
* the "platform bus" (of_platform_bus_type) (or ISA, EBUS and SBUS
|
||||
* busses on sparc).
|
||||
* the "platform bus" (of_platform_bus_type).
|
||||
*/
|
||||
struct of_platform_driver
|
||||
{
|
||||
|
@@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops);
|
||||
void oprofile_arch_exit(void);
|
||||
|
||||
/**
|
||||
* Add a sample. This may be called from any context. Pass
|
||||
* smp_processor_id() as cpu.
|
||||
* Add a sample. This may be called from any context.
|
||||
*/
|
||||
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
|
||||
|
||||
|
@@ -45,7 +45,11 @@ struct k_itimer {
|
||||
int it_requeue_pending; /* waiting to requeue this timer */
|
||||
#define REQUEUE_PENDING 1
|
||||
int it_sigev_notify; /* notify word of sigevent struct */
|
||||
struct task_struct *it_process; /* process to send signal to */
|
||||
struct signal_struct *it_signal;
|
||||
union {
|
||||
struct pid *it_pid; /* pid of process to send signal to */
|
||||
struct task_struct *it_process; /* for clock_nanosleep */
|
||||
};
|
||||
struct sigqueue *sigq; /* signal queue entry. */
|
||||
union {
|
||||
struct {
|
||||
|
@@ -8,6 +8,7 @@
|
||||
#define _LINUX_RANDOM_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/irqnr.h>
|
||||
|
||||
/* ioctl()'s for the random number generator */
|
||||
|
||||
@@ -44,6 +45,56 @@ struct rand_pool_info {
|
||||
|
||||
extern void rand_initialize_irq(int irq);
|
||||
|
||||
struct timer_rand_state;
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
|
||||
extern struct timer_rand_state *irq_timer_state[];
|
||||
|
||||
static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
|
||||
{
|
||||
if (irq >= nr_irqs)
|
||||
return NULL;
|
||||
|
||||
return irq_timer_state[irq];
|
||||
}
|
||||
|
||||
static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
|
||||
{
|
||||
if (irq >= nr_irqs)
|
||||
return;
|
||||
|
||||
irq_timer_state[irq] = state;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#include <linux/irq.h>
|
||||
static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
return desc->timer_rand_state;
|
||||
}
|
||||
|
||||
static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
desc->timer_rand_state = state;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
extern void add_input_randomness(unsigned int type, unsigned int code,
|
||||
unsigned int value);
|
||||
extern void add_interrupt_randomness(int irq);
|
||||
|
@@ -41,7 +41,7 @@
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
|
||||
#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
|
||||
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
|
||||
|
@@ -52,11 +52,15 @@ struct rcu_head {
|
||||
void (*func)(struct rcu_head *head);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CLASSIC_RCU
|
||||
#if defined(CONFIG_CLASSIC_RCU)
|
||||
#include <linux/rcuclassic.h>
|
||||
#else /* #ifdef CONFIG_CLASSIC_RCU */
|
||||
#elif defined(CONFIG_TREE_RCU)
|
||||
#include <linux/rcutree.h>
|
||||
#elif defined(CONFIG_PREEMPT_RCU)
|
||||
#include <linux/rcupreempt.h>
|
||||
#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
|
||||
#else
|
||||
#error "Unknown RCU implementation specified to kernel configuration"
|
||||
#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
|
||||
|
||||
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
|
||||
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
|
||||
|
329
include/linux/rcutree.h
Normal file
329
include/linux/rcutree.h
Normal file
@@ -0,0 +1,329 @@
|
||||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
* Author: Dipankar Sarma <dipankar@in.ibm.com>
|
||||
* Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
|
||||
*
|
||||
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
|
||||
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
||||
*
|
||||
* For detailed explanation of Read-Copy Update mechanism see -
|
||||
* Documentation/RCU
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_RCUTREE_H
|
||||
#define __LINUX_RCUTREE_H
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
/*
|
||||
* Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
|
||||
* In theory, it should be possible to add more levels straightforwardly.
|
||||
* In practice, this has not been tested, so there is probably some
|
||||
* bug somewhere.
|
||||
*/
|
||||
#define MAX_RCU_LVLS 3
|
||||
#define RCU_FANOUT (CONFIG_RCU_FANOUT)
|
||||
#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
|
||||
#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
|
||||
|
||||
#if NR_CPUS <= RCU_FANOUT
|
||||
# define NUM_RCU_LVLS 1
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 (NR_CPUS)
|
||||
# define NUM_RCU_LVL_2 0
|
||||
# define NUM_RCU_LVL_3 0
|
||||
#elif NR_CPUS <= RCU_FANOUT_SQ
|
||||
# define NUM_RCU_LVLS 2
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
|
||||
# define NUM_RCU_LVL_2 (NR_CPUS)
|
||||
# define NUM_RCU_LVL_3 0
|
||||
#elif NR_CPUS <= RCU_FANOUT_CUBE
|
||||
# define NUM_RCU_LVLS 3
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
|
||||
# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
|
||||
# define NUM_RCU_LVL_3 NR_CPUS
|
||||
#else
|
||||
# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
|
||||
#endif /* #if (NR_CPUS) <= RCU_FANOUT */
|
||||
|
||||
#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
|
||||
#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
|
||||
|
||||
/*
|
||||
* Dynticks per-CPU state.
|
||||
*/
|
||||
struct rcu_dynticks {
|
||||
int dynticks_nesting; /* Track nesting level, sort of. */
|
||||
int dynticks; /* Even value for dynticks-idle, else odd. */
|
||||
int dynticks_nmi; /* Even value for either dynticks-idle or */
|
||||
/* not in nmi handler, else odd. So this */
|
||||
/* remains even for nmi from irq handler. */
|
||||
};
|
||||
|
||||
/*
|
||||
* Definition for node within the RCU grace-period-detection hierarchy.
|
||||
*/
|
||||
struct rcu_node {
|
||||
spinlock_t lock;
|
||||
unsigned long qsmask; /* CPUs or groups that need to switch in */
|
||||
/* order for current grace period to proceed.*/
|
||||
unsigned long qsmaskinit;
|
||||
/* Per-GP initialization for qsmask. */
|
||||
unsigned long grpmask; /* Mask to apply to parent qsmask. */
|
||||
int grplo; /* lowest-numbered CPU or group here. */
|
||||
int grphi; /* highest-numbered CPU or group here. */
|
||||
u8 grpnum; /* CPU/group number for next level up. */
|
||||
u8 level; /* root is at level 0. */
|
||||
struct rcu_node *parent;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
/* Index values for nxttail array in struct rcu_data. */
|
||||
#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
|
||||
#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
|
||||
#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
|
||||
#define RCU_NEXT_TAIL 3
|
||||
#define RCU_NEXT_SIZE 4
|
||||
|
||||
/* Per-CPU data for read-copy update. */
|
||||
struct rcu_data {
|
||||
/* 1) quiescent-state and grace-period handling : */
|
||||
long completed; /* Track rsp->completed gp number */
|
||||
/* in order to detect GP end. */
|
||||
long gpnum; /* Highest gp number that this CPU */
|
||||
/* is aware of having started. */
|
||||
long passed_quiesc_completed;
|
||||
/* Value of completed at time of qs. */
|
||||
bool passed_quiesc; /* User-mode/idle loop etc. */
|
||||
bool qs_pending; /* Core waits for quiesc state. */
|
||||
bool beenonline; /* CPU online at least once. */
|
||||
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
||||
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
||||
|
||||
/* 2) batch handling */
|
||||
/*
|
||||
* If nxtlist is not NULL, it is partitioned as follows.
|
||||
* Any of the partitions might be empty, in which case the
|
||||
* pointer to that partition will be equal to the pointer for
|
||||
* the following partition. When the list is empty, all of
|
||||
* the nxttail elements point to nxtlist, which is NULL.
|
||||
*
|
||||
* [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
|
||||
* Entries that might have arrived after current GP ended
|
||||
* [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
|
||||
* Entries known to have arrived before current GP ended
|
||||
* [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
|
||||
* Entries that batch # <= ->completed - 1: waiting for current GP
|
||||
* [nxtlist, *nxttail[RCU_DONE_TAIL]):
|
||||
* Entries that batch # <= ->completed
|
||||
* The grace period for these entries has completed, and
|
||||
* the other grace-period-completed entries may be moved
|
||||
* here temporarily in rcu_process_callbacks().
|
||||
*/
|
||||
struct rcu_head *nxtlist;
|
||||
struct rcu_head **nxttail[RCU_NEXT_SIZE];
|
||||
long qlen; /* # of queued callbacks */
|
||||
long blimit; /* Upper limit on a processed batch */
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
/* 3) dynticks interface. */
|
||||
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
|
||||
int dynticks_snap; /* Per-GP tracking for dynticks. */
|
||||
int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
|
||||
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
|
||||
#ifdef CONFIG_NO_HZ
|
||||
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
unsigned long offline_fqs; /* Kicked due to being offline. */
|
||||
unsigned long resched_ipi; /* Sent a resched IPI. */
|
||||
|
||||
/* 5) state to allow this CPU to force_quiescent_state on others */
|
||||
long n_rcu_pending; /* rcu_pending() calls since boot. */
|
||||
long n_rcu_pending_force_qs; /* when to force quiescent states. */
|
||||
|
||||
int cpu;
|
||||
};
|
||||
|
||||
/* Values for signaled field in struct rcu_state. */
|
||||
#define RCU_GP_INIT 0 /* Grace period being initialized. */
|
||||
#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
|
||||
#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
|
||||
#ifdef CONFIG_NO_HZ
|
||||
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
|
||||
#else /* #ifdef CONFIG_NO_HZ */
|
||||
#define RCU_SIGNAL_INIT RCU_FORCE_QS
|
||||
#endif /* #else #ifdef CONFIG_NO_HZ */
|
||||
|
||||
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
|
||||
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
|
||||
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
|
||||
/* to take at least one */
|
||||
/* scheduling clock irq */
|
||||
/* before ratting on them. */
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
|
||||
/*
|
||||
* RCU global state, including node hierarchy. This hierarchy is
|
||||
* represented in "heap" form in a dense array. The root (first level)
|
||||
* of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
|
||||
* level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
|
||||
* and the third level in ->node[m+1] and following (->node[m+1] referenced
|
||||
* by ->level[2]). The number of levels is determined by the number of
|
||||
* CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
|
||||
* consisting of a single rcu_node.
|
||||
*/
|
||||
struct rcu_state {
|
||||
struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
|
||||
struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
|
||||
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
|
||||
u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
|
||||
struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
|
||||
|
||||
/* The following fields are guarded by the root rcu_node's lock. */
|
||||
|
||||
u8 signaled ____cacheline_internodealigned_in_smp;
|
||||
/* Force QS state. */
|
||||
long gpnum; /* Current gp number. */
|
||||
long completed; /* # of last completed gp. */
|
||||
spinlock_t onofflock; /* exclude on/offline and */
|
||||
/* starting new GP. */
|
||||
spinlock_t fqslock; /* Only one task forcing */
|
||||
/* quiescent states. */
|
||||
unsigned long jiffies_force_qs; /* Time at which to invoke */
|
||||
/* force_quiescent_state(). */
|
||||
unsigned long n_force_qs; /* Number of calls to */
|
||||
/* force_quiescent_state(). */
|
||||
unsigned long n_force_qs_lh; /* ~Number of calls leaving */
|
||||
/* due to lock unavailable. */
|
||||
unsigned long n_force_qs_ngp; /* Number of calls leaving */
|
||||
/* due to no GP active. */
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
unsigned long gp_start; /* Time at which GP started, */
|
||||
/* but in jiffies. */
|
||||
unsigned long jiffies_stall; /* Time at which to check */
|
||||
/* for CPU stalls. */
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
#ifdef CONFIG_NO_HZ
|
||||
long dynticks_completed; /* Value of completed @ snap. */
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
};
|
||||
|
||||
extern struct rcu_state rcu_state;
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_data);
|
||||
|
||||
extern struct rcu_state rcu_bh_state;
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
|
||||
|
||||
/*
|
||||
* Increment the quiescent state counter.
|
||||
* The counter is a bit degenerated: We do not need to know
|
||||
* how many quiescent states passed, just if there was at least
|
||||
* one since the start of the grace period. Thus just a flag.
|
||||
*/
|
||||
static inline void rcu_qsctr_inc(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
|
||||
rdp->passed_quiesc = 1;
|
||||
rdp->passed_quiesc_completed = rdp->completed;
|
||||
}
|
||||
static inline void rcu_bh_qsctr_inc(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
|
||||
rdp->passed_quiesc = 1;
|
||||
rdp->passed_quiesc_completed = rdp->completed;
|
||||
}
|
||||
|
||||
extern int rcu_pending(int cpu);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern struct lockdep_map rcu_lock_map;
|
||||
# define rcu_read_acquire() \
|
||||
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
|
||||
#else
|
||||
# define rcu_read_acquire() do { } while (0)
|
||||
# define rcu_read_release() do { } while (0)
|
||||
#endif
|
||||
|
||||
static inline void __rcu_read_lock(void)
|
||||
{
|
||||
preempt_disable();
|
||||
__acquire(RCU);
|
||||
rcu_read_acquire();
|
||||
}
|
||||
static inline void __rcu_read_unlock(void)
|
||||
{
|
||||
rcu_read_release();
|
||||
__release(RCU);
|
||||
preempt_enable();
|
||||
}
|
||||
static inline void __rcu_read_lock_bh(void)
|
||||
{
|
||||
local_bh_disable();
|
||||
__acquire(RCU_BH);
|
||||
rcu_read_acquire();
|
||||
}
|
||||
static inline void __rcu_read_unlock_bh(void)
|
||||
{
|
||||
rcu_read_release();
|
||||
__release(RCU_BH);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
#define __synchronize_sched() synchronize_rcu()
|
||||
|
||||
#define call_rcu_sched(head, func) call_rcu(head, func)
|
||||
|
||||
static inline void rcu_init_sched(void)
|
||||
{
|
||||
}
|
||||
|
||||
extern void __rcu_init(void);
|
||||
extern void rcu_check_callbacks(int cpu, int user);
|
||||
extern void rcu_restart_cpu(int cpu);
|
||||
|
||||
extern long rcu_batches_completed(void);
|
||||
extern long rcu_batches_completed_bh(void);
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
void rcu_enter_nohz(void);
|
||||
void rcu_exit_nohz(void);
|
||||
#else /* CONFIG_NO_HZ */
|
||||
static inline void rcu_enter_nohz(void)
|
||||
{
|
||||
}
|
||||
static inline void rcu_exit_nohz(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
#endif /* __LINUX_RCUTREE_H */
|
@@ -118,6 +118,8 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
|
||||
|
||||
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
|
||||
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
|
||||
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
|
||||
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
||||
|
||||
u64 ring_buffer_time_stamp(int cpu);
|
||||
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
|
||||
|
@@ -158,6 +158,8 @@
|
||||
/* SH-SCI */
|
||||
#define PORT_SCIFA 83
|
||||
|
||||
#define PORT_S3C6400 84
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
|
||||
* request comes from.
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
|
||||
extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
|
||||
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
|
||||
#define kmalloc_track_caller(size, flags) \
|
||||
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
|
||||
__kmalloc_track_caller(size, flags, _RET_IP_)
|
||||
#else
|
||||
#define kmalloc_track_caller(size, flags) \
|
||||
__kmalloc(size, flags)
|
||||
@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
|
||||
* allocation request comes from.
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
|
||||
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
|
||||
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
|
||||
#define kmalloc_node_track_caller(size, flags, node) \
|
||||
__kmalloc_node_track_caller(size, flags, node, \
|
||||
__builtin_return_address(0))
|
||||
_RET_IP_)
|
||||
#else
|
||||
#define kmalloc_node_track_caller(size, flags, node) \
|
||||
__kmalloc_node(size, flags, node)
|
||||
@@ -285,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
|
||||
#define kmalloc_node_track_caller(size, flags, node) \
|
||||
kmalloc_track_caller(size, flags)
|
||||
|
||||
#endif /* DEBUG_SLAB */
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
/*
|
||||
* Shortcuts
|
||||
|
@@ -58,6 +58,7 @@ struct rpc_clnt {
|
||||
struct rpc_timeout cl_timeout_default;
|
||||
struct rpc_program * cl_program;
|
||||
char cl_inline_name[32];
|
||||
char *cl_principal; /* target to authenticate to */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -108,6 +109,7 @@ struct rpc_create_args {
|
||||
u32 version;
|
||||
rpc_authflavor_t authflavor;
|
||||
unsigned long flags;
|
||||
char *client_name;
|
||||
};
|
||||
|
||||
/* Values for "flags" field */
|
||||
|
@@ -15,6 +15,7 @@ struct rpc_pipe_ops {
|
||||
ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t);
|
||||
ssize_t (*downcall)(struct file *, const char __user *, size_t);
|
||||
void (*release_pipe)(struct inode *);
|
||||
int (*open_pipe)(struct inode *);
|
||||
void (*destroy_msg)(struct rpc_pipe_msg *);
|
||||
};
|
||||
|
||||
|
@@ -20,6 +20,7 @@ int gss_svc_init(void);
|
||||
void gss_svc_shutdown(void);
|
||||
int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
|
||||
u32 svcauth_gss_flavor(struct auth_domain *dom);
|
||||
char *svc_gss_principal(struct svc_rqst *);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
|
||||
|
@@ -36,21 +36,6 @@ struct xdr_netobj {
|
||||
*/
|
||||
typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
|
||||
|
||||
/*
|
||||
* We're still requiring the BKL in the xdr code until it's been
|
||||
* more carefully audited, at which point this wrapper will become
|
||||
* unnecessary.
|
||||
*/
|
||||
static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
ret = xdrproc(rqstp, data, obj);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic structure for transmission/reception of a client XDR message.
|
||||
* Features a header (for a linear buffer containing RPC headers
|
||||
|
@@ -76,8 +76,7 @@ struct rpc_rqst {
|
||||
struct list_head rq_list;
|
||||
|
||||
__u32 * rq_buffer; /* XDR encode buffer */
|
||||
size_t rq_bufsize,
|
||||
rq_callsize,
|
||||
size_t rq_callsize,
|
||||
rq_rcvsize;
|
||||
|
||||
struct xdr_buf rq_private_buf; /* The receive buffer
|
||||
|
@@ -7,9 +7,31 @@ struct device;
|
||||
struct dma_attrs;
|
||||
struct scatterlist;
|
||||
|
||||
/*
|
||||
* Maximum allowable number of contiguous slabs to map,
|
||||
* must be a power of 2. What is the appropriate value ?
|
||||
* The complexity of {map,unmap}_single is linearly dependent on this value.
|
||||
*/
|
||||
#define IO_TLB_SEGSIZE 128
|
||||
|
||||
|
||||
/*
|
||||
* log of the size of each IO TLB slab. The number of slabs is command line
|
||||
* controllable.
|
||||
*/
|
||||
#define IO_TLB_SHIFT 11
|
||||
|
||||
extern void
|
||||
swiotlb_init(void);
|
||||
|
||||
extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
|
||||
extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
|
||||
|
||||
extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
|
||||
extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
|
||||
|
||||
extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
|
||||
|
||||
extern void
|
||||
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags);
|
||||
|
@@ -53,46 +53,10 @@
|
||||
#ifndef _LINUX_TIMEX_H
|
||||
#define _LINUX_TIMEX_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#include <asm/param.h>
|
||||
|
||||
#define NTP_API 4 /* NTP API version */
|
||||
|
||||
/*
|
||||
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
|
||||
* for a slightly underdamped convergence characteristic. SHIFT_KH
|
||||
* establishes the damping of the FLL and is chosen by wisdom and black
|
||||
* art.
|
||||
*
|
||||
* MAXTC establishes the maximum time constant of the PLL. With the
|
||||
* SHIFT_KG and SHIFT_KF values given and a time constant range from
|
||||
* zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
|
||||
* respectively.
|
||||
*/
|
||||
#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
|
||||
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
|
||||
#define MAXTC 10 /* maximum time constant (shift) */
|
||||
|
||||
/*
|
||||
* SHIFT_USEC defines the scaling (shift) of the time_freq and
|
||||
* time_tolerance variables, which represent the current frequency
|
||||
* offset and maximum frequency tolerance.
|
||||
*/
|
||||
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
|
||||
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
|
||||
#define PPM_SCALE_INV_SHIFT 19
|
||||
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
|
||||
PPM_SCALE + 1)
|
||||
|
||||
#define MAXPHASE 500000000l /* max phase error (ns) */
|
||||
#define MAXFREQ 500000 /* max frequency error (ns/s) */
|
||||
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
|
||||
#define MINSEC 256 /* min interval between updates (s) */
|
||||
#define MAXSEC 2048 /* max interval between updates (s) */
|
||||
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
|
||||
|
||||
/*
|
||||
* syscall interface - used (mainly by NTP daemon)
|
||||
* to discipline kernel clock oscillator
|
||||
@@ -199,8 +163,45 @@ struct timex {
|
||||
#define TIME_BAD TIME_ERROR /* bw compat */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/param.h>
|
||||
|
||||
#include <asm/timex.h>
|
||||
|
||||
/*
|
||||
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
|
||||
* for a slightly underdamped convergence characteristic. SHIFT_KH
|
||||
* establishes the damping of the FLL and is chosen by wisdom and black
|
||||
* art.
|
||||
*
|
||||
* MAXTC establishes the maximum time constant of the PLL. With the
|
||||
* SHIFT_KG and SHIFT_KF values given and a time constant range from
|
||||
* zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
|
||||
* respectively.
|
||||
*/
|
||||
#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
|
||||
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
|
||||
#define MAXTC 10 /* maximum time constant (shift) */
|
||||
|
||||
/*
|
||||
* SHIFT_USEC defines the scaling (shift) of the time_freq and
|
||||
* time_tolerance variables, which represent the current frequency
|
||||
* offset and maximum frequency tolerance.
|
||||
*/
|
||||
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
|
||||
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
|
||||
#define PPM_SCALE_INV_SHIFT 19
|
||||
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
|
||||
PPM_SCALE + 1)
|
||||
|
||||
#define MAXPHASE 500000000l /* max phase error (ns) */
|
||||
#define MAXFREQ 500000 /* max frequency error (ns/s) */
|
||||
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
|
||||
#define MINSEC 256 /* min interval between updates (s) */
|
||||
#define MAXSEC 2048 /* max interval between updates (s) */
|
||||
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
|
||||
|
||||
/*
|
||||
* kernel variables
|
||||
* Note: maximum error = NTP synch distance = dispersion + delay / 2;
|
||||
|
@@ -135,19 +135,14 @@ typedef __s64 int64_t;
|
||||
*
|
||||
* Linux always considers sectors to be 512 bytes long independently
|
||||
* of the devices real block size.
|
||||
*
|
||||
* blkcnt_t is the type of the inode's block count.
|
||||
*/
|
||||
#ifdef CONFIG_LBD
|
||||
typedef u64 sector_t;
|
||||
#else
|
||||
typedef unsigned long sector_t;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The type of the inode's block count.
|
||||
*/
|
||||
#ifdef CONFIG_LSF
|
||||
typedef u64 blkcnt_t;
|
||||
#else
|
||||
typedef unsigned long sector_t;
|
||||
typedef unsigned long blkcnt_t;
|
||||
#endif
|
||||
|
||||
|
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
|
||||
\
|
||||
set_fs(KERNEL_DS); \
|
||||
pagefault_disable(); \
|
||||
ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \
|
||||
ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
|
||||
pagefault_enable(); \
|
||||
set_fs(old_fs); \
|
||||
ret; \
|
||||
|
@@ -293,6 +293,7 @@ struct v4l2_pix_format {
|
||||
#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
|
||||
#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */
|
||||
#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */
|
||||
#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
|
||||
#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */
|
||||
#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */
|
||||
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
|
||||
@@ -304,6 +305,8 @@ struct v4l2_pix_format {
|
||||
/* two planes -- one Y, one Cr + Cb interleaved */
|
||||
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
|
||||
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
|
||||
#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
|
||||
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
|
||||
|
||||
/* The following formats are not defined in the V4L2 specification */
|
||||
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
|
||||
@@ -1050,7 +1053,7 @@ enum v4l2_mpeg_video_bitrate_mode {
|
||||
#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
|
||||
#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
|
||||
|
||||
/* MPEG-class control IDs specific to the CX2584x driver as defined by V4L2 */
|
||||
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
|
||||
#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
|
||||
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
|
||||
enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
|
||||
@@ -1117,6 +1120,12 @@ enum v4l2_exposure_auto_type {
|
||||
#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11)
|
||||
#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12)
|
||||
|
||||
#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13)
|
||||
#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14)
|
||||
#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15)
|
||||
|
||||
#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
|
||||
|
||||
/*
|
||||
* T U N I N G
|
||||
*/
|
||||
@@ -1369,6 +1378,7 @@ struct v4l2_streamparm {
|
||||
#define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */
|
||||
#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */
|
||||
#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */
|
||||
#define V4L2_CHIP_MATCH_AC97 3 /* Match against anciliary AC97 chip */
|
||||
|
||||
struct v4l2_register {
|
||||
__u32 match_type; /* Match type */
|
||||
@@ -1458,6 +1468,8 @@ struct v4l2_chip_ident {
|
||||
#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident)
|
||||
#endif
|
||||
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
|
||||
/* Reminder: when adding new ioctls please add support for them to
|
||||
drivers/media/video/v4l2-compat-ioctl32.c as well! */
|
||||
|
||||
#ifdef __OLD_VIDIOC_
|
||||
/* for compatibility, will go away some day */
|
||||
|
@@ -10,6 +10,9 @@
|
||||
/* The feature bitmap for virtio balloon */
|
||||
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
|
||||
|
||||
/* Size of a PFN in the balloon interface. */
|
||||
#define VIRTIO_BALLOON_PFN_SHIFT 12
|
||||
|
||||
struct virtio_balloon_config
|
||||
{
|
||||
/* Number of pages host wants Guest to give up. */
|
||||
|
@@ -7,6 +7,17 @@
|
||||
/* The ID for virtio console */
|
||||
#define VIRTIO_ID_CONSOLE 3
|
||||
|
||||
/* Feature bits */
|
||||
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
|
||||
|
||||
struct virtio_console_config {
|
||||
/* colums of the screens */
|
||||
__u16 cols;
|
||||
/* rows of the screens */
|
||||
__u16 rows;
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
|
||||
#endif /* __KERNEL__ */
|
||||
|
@@ -53,4 +53,12 @@
|
||||
|
||||
/* Virtio ABI version, this must match exactly */
|
||||
#define VIRTIO_PCI_ABI_VERSION 0
|
||||
|
||||
/* How many bits to shift physical queue address written to QUEUE_PFN.
|
||||
* 12 is historical, and due to x86 page size. */
|
||||
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
|
||||
|
||||
/* The alignment to use between consumer and producer parts of vring.
|
||||
* x86 pagesize again. */
|
||||
#define VIRTIO_PCI_VRING_ALIGN 4096
|
||||
#endif
|
||||
|
@@ -83,7 +83,7 @@ struct vring {
|
||||
* __u16 avail_idx;
|
||||
* __u16 available[num];
|
||||
*
|
||||
* // Padding to the next page boundary.
|
||||
* // Padding to the next align boundary.
|
||||
* char pad[];
|
||||
*
|
||||
* // A ring of used descriptor heads with free-running index.
|
||||
@@ -93,19 +93,19 @@ struct vring {
|
||||
* };
|
||||
*/
|
||||
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
|
||||
unsigned long pagesize)
|
||||
unsigned long align)
|
||||
{
|
||||
vr->num = num;
|
||||
vr->desc = p;
|
||||
vr->avail = p + num*sizeof(struct vring_desc);
|
||||
vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + pagesize-1)
|
||||
& ~(pagesize - 1));
|
||||
vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1)
|
||||
& ~(align - 1));
|
||||
}
|
||||
|
||||
static inline unsigned vring_size(unsigned int num, unsigned long pagesize)
|
||||
static inline unsigned vring_size(unsigned int num, unsigned long align)
|
||||
{
|
||||
return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num)
|
||||
+ pagesize - 1) & ~(pagesize - 1))
|
||||
+ align - 1) & ~(align - 1))
|
||||
+ sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num;
|
||||
}
|
||||
|
||||
@@ -115,6 +115,7 @@ struct virtio_device;
|
||||
struct virtqueue;
|
||||
|
||||
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
||||
unsigned int vring_align,
|
||||
struct virtio_device *vdev,
|
||||
void *pages,
|
||||
void (*notify)(struct virtqueue *vq),
|
||||
|
Reference in New Issue
Block a user