Merge branch 'linus' into x86/pci-ioapic-boot-irq-quirks
Conflicts: drivers/pci/quirks.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -235,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
|
||||
int acpi_check_mem_region(resource_size_t start, resource_size_t n,
|
||||
const char *name);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
void __init acpi_old_suspend_ordering(void);
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
#else /* CONFIG_ACPI */
|
||||
|
||||
static inline int early_acpi_boot_init(void)
|
||||
|
@@ -84,7 +84,6 @@ enum adb_message {
|
||||
ADB_MSG_PRE_RESET, /* Called before resetting the bus */
|
||||
ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */
|
||||
};
|
||||
extern struct adb_driver *adb_controller;
|
||||
extern struct blocking_notifier_head adb_client_list;
|
||||
|
||||
int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
|
||||
|
@@ -64,6 +64,7 @@ struct bio_vec {
|
||||
|
||||
struct bio_set;
|
||||
struct bio;
|
||||
struct bio_integrity_payload;
|
||||
typedef void (bio_end_io_t) (struct bio *, int);
|
||||
typedef void (bio_destructor_t) (struct bio *);
|
||||
|
||||
@@ -112,6 +113,9 @@ struct bio {
|
||||
atomic_t bi_cnt; /* pin count */
|
||||
|
||||
void *bi_private;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
struct bio_integrity_payload *bi_integrity; /* data integrity */
|
||||
#endif
|
||||
|
||||
bio_destructor_t *bi_destructor; /* destructor */
|
||||
};
|
||||
@@ -271,6 +275,29 @@ static inline void *bio_data(struct bio *bio)
|
||||
*/
|
||||
#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
/*
|
||||
* bio integrity payload
|
||||
*/
|
||||
struct bio_integrity_payload {
|
||||
struct bio *bip_bio; /* parent bio */
|
||||
struct bio_vec *bip_vec; /* integrity data vector */
|
||||
|
||||
sector_t bip_sector; /* virtual start sector */
|
||||
|
||||
void *bip_buf; /* generated integrity data */
|
||||
bio_end_io_t *bip_end_io; /* saved I/O completion fn */
|
||||
|
||||
int bip_error; /* saved I/O error */
|
||||
unsigned int bip_size;
|
||||
|
||||
unsigned short bip_pool; /* pool the ivec came from */
|
||||
unsigned short bip_vcnt; /* # of integrity bio_vecs */
|
||||
unsigned short bip_idx; /* current bip_vec index */
|
||||
|
||||
struct work_struct bip_work; /* I/O completion */
|
||||
};
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
/*
|
||||
* A bio_pair is used when we need to split a bio.
|
||||
@@ -283,10 +310,14 @@ static inline void *bio_data(struct bio *bio)
|
||||
* in bio2.bi_private
|
||||
*/
|
||||
struct bio_pair {
|
||||
struct bio bio1, bio2;
|
||||
struct bio_vec bv1, bv2;
|
||||
atomic_t cnt;
|
||||
int error;
|
||||
struct bio bio1, bio2;
|
||||
struct bio_vec bv1, bv2;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
struct bio_integrity_payload bip1, bip2;
|
||||
struct bio_vec iv1, iv2;
|
||||
#endif
|
||||
atomic_t cnt;
|
||||
int error;
|
||||
};
|
||||
extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
|
||||
int first_sectors);
|
||||
@@ -333,6 +364,39 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
|
||||
int, int);
|
||||
extern int bio_uncopy_user(struct bio *);
|
||||
void zero_fill_bio(struct bio *bio);
|
||||
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
|
||||
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||
|
||||
/*
|
||||
* bio_set is used to allow other portions of the IO system to
|
||||
* allocate their own private memory pools for bio and iovec structures.
|
||||
* These memory pools in turn all allocate from the bio_slab
|
||||
* and the bvec_slabs[].
|
||||
*/
|
||||
#define BIO_POOL_SIZE 2
|
||||
#define BIOVEC_NR_POOLS 6
|
||||
|
||||
struct bio_set {
|
||||
mempool_t *bio_pool;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
mempool_t *bio_integrity_pool;
|
||||
#endif
|
||||
mempool_t *bvec_pools[BIOVEC_NR_POOLS];
|
||||
};
|
||||
|
||||
struct biovec_slab {
|
||||
int nr_vecs;
|
||||
char *name;
|
||||
struct kmem_cache *slab;
|
||||
};
|
||||
|
||||
extern struct bio_set *fs_bio_set;
|
||||
|
||||
/*
|
||||
* a small number of entries is fine, not going to be performance critical.
|
||||
* basically we just need to survive
|
||||
*/
|
||||
#define BIO_SPLIT_ENTRIES 2
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
@@ -381,5 +445,63 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
|
||||
__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
|
||||
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
|
||||
#define bip_vec(bip) bip_vec_idx(bip, 0)
|
||||
|
||||
#define __bip_for_each_vec(bvl, bip, i, start_idx) \
|
||||
for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
|
||||
i < (bip)->bip_vcnt; \
|
||||
bvl++, i++)
|
||||
|
||||
#define bip_for_each_vec(bvl, bip, i) \
|
||||
__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
|
||||
|
||||
static inline int bio_integrity(struct bio *bio)
|
||||
{
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
return bio->bi_integrity != NULL;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
|
||||
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
|
||||
extern void bio_integrity_free(struct bio *, struct bio_set *);
|
||||
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
|
||||
extern int bio_integrity_enabled(struct bio *bio);
|
||||
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
|
||||
extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
|
||||
extern int bio_integrity_prep(struct bio *);
|
||||
extern void bio_integrity_endio(struct bio *, int);
|
||||
extern void bio_integrity_advance(struct bio *, unsigned int);
|
||||
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
|
||||
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
|
||||
extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *);
|
||||
extern int bioset_integrity_create(struct bio_set *, int);
|
||||
extern void bioset_integrity_free(struct bio_set *);
|
||||
extern void bio_integrity_init_slab(void);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
#define bio_integrity(a) (0)
|
||||
#define bioset_integrity_create(a, b) (0)
|
||||
#define bio_integrity_prep(a) (0)
|
||||
#define bio_integrity_enabled(a) (0)
|
||||
#define bio_integrity_clone(a, b, c) (0)
|
||||
#define bioset_integrity_free(a) do { } while (0)
|
||||
#define bio_integrity_free(a, b) do { } while (0)
|
||||
#define bio_integrity_endio(a, b) do { } while (0)
|
||||
#define bio_integrity_advance(a, b) do { } while (0)
|
||||
#define bio_integrity_trim(a, b, c) do { } while (0)
|
||||
#define bio_integrity_split(a, b, c) do { } while (0)
|
||||
#define bio_integrity_set_tag(a, b, c) do { } while (0)
|
||||
#define bio_integrity_get_tag(a, b, c) do { } while (0)
|
||||
#define bio_integrity_init_slab(a) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
#endif /* __LINUX_BIO_H */
|
||||
|
@@ -23,7 +23,6 @@
|
||||
struct scsi_ioctl_command;
|
||||
|
||||
struct request_queue;
|
||||
typedef struct request_queue request_queue_t __deprecated;
|
||||
struct elevator_queue;
|
||||
typedef struct elevator_queue elevator_t;
|
||||
struct request_pm_state;
|
||||
@@ -34,12 +33,6 @@ struct sg_io_hdr;
|
||||
#define BLKDEV_MIN_RQ 4
|
||||
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
||||
|
||||
int put_io_context(struct io_context *ioc);
|
||||
void exit_io_context(void);
|
||||
struct io_context *get_io_context(gfp_t gfp_flags, int node);
|
||||
struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
|
||||
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
|
||||
|
||||
struct request;
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
|
||||
@@ -113,6 +106,7 @@ enum rq_flag_bits {
|
||||
__REQ_ALLOCED, /* request came from our alloc pool */
|
||||
__REQ_RW_META, /* metadata io request */
|
||||
__REQ_COPY_USER, /* contains copies of user pages */
|
||||
__REQ_INTEGRITY, /* integrity metadata has been remapped */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
@@ -135,6 +129,7 @@ enum rq_flag_bits {
|
||||
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
|
||||
#define REQ_RW_META (1 << __REQ_RW_META)
|
||||
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
|
||||
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
|
||||
|
||||
#define BLK_MAX_CDB 16
|
||||
|
||||
@@ -259,7 +254,14 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
||||
typedef void (unplug_fn) (struct request_queue *);
|
||||
|
||||
struct bio_vec;
|
||||
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
|
||||
struct bvec_merge_data {
|
||||
struct block_device *bi_bdev;
|
||||
sector_t bi_sector;
|
||||
unsigned bi_size;
|
||||
unsigned long bi_rw;
|
||||
};
|
||||
typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
|
||||
struct bio_vec *);
|
||||
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
|
||||
typedef void (softirq_done_fn)(struct request *);
|
||||
typedef int (dma_drain_needed_fn)(struct request *);
|
||||
@@ -426,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
|
||||
__set_bit(flag, &q->queue_flags);
|
||||
}
|
||||
|
||||
static inline int queue_flag_test_and_clear(unsigned int flag,
|
||||
struct request_queue *q)
|
||||
{
|
||||
WARN_ON_ONCE(!queue_is_locked(q));
|
||||
|
||||
if (test_bit(flag, &q->queue_flags)) {
|
||||
__clear_bit(flag, &q->queue_flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int queue_flag_test_and_set(unsigned int flag,
|
||||
struct request_queue *q)
|
||||
{
|
||||
WARN_ON_ONCE(!queue_is_locked(q));
|
||||
|
||||
if (!test_bit(flag, &q->queue_flags)) {
|
||||
__set_bit(flag, &q->queue_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
|
||||
{
|
||||
WARN_ON_ONCE(!queue_is_locked(q));
|
||||
@@ -623,7 +651,6 @@ extern void generic_make_request(struct bio *bio);
|
||||
extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern void __blk_put_request(struct request_queue *, struct request *);
|
||||
extern void blk_end_sync_rq(struct request *rq, int error);
|
||||
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
|
||||
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
|
||||
extern void blk_requeue_request(struct request_queue *, struct request *);
|
||||
@@ -676,7 +703,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
||||
struct request *, int);
|
||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
struct request *, int, rq_end_io_fn *);
|
||||
extern int blk_verify_command(unsigned char *, int);
|
||||
extern void blk_unplug(struct request_queue *q);
|
||||
|
||||
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
||||
@@ -749,6 +775,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
|
||||
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
|
||||
extern int blk_queue_dma_drain(struct request_queue *q,
|
||||
dma_drain_needed_fn *dma_drain_needed,
|
||||
void *buf, unsigned int size);
|
||||
@@ -802,6 +829,15 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
|
||||
|
||||
extern int blkdev_issue_flush(struct block_device *, sector_t *);
|
||||
|
||||
/*
|
||||
* command filter functions
|
||||
*/
|
||||
extern int blk_verify_command(struct file *file, unsigned char *cmd);
|
||||
extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
|
||||
unsigned char *cmd, mode_t *f_mode);
|
||||
extern int blk_register_filter(struct gendisk *disk);
|
||||
extern void blk_unregister_filter(struct gendisk *disk);
|
||||
|
||||
#define MAX_PHYS_SEGMENTS 128
|
||||
#define MAX_HW_SEGMENTS 128
|
||||
#define SAFE_MAX_SECTORS 255
|
||||
@@ -865,6 +901,108 @@ void kblockd_flush_work(struct work_struct *work);
|
||||
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
|
||||
MODULE_ALIAS("block-major-" __stringify(major) "-*")
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
|
||||
#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
|
||||
|
||||
struct blk_integrity_exchg {
|
||||
void *prot_buf;
|
||||
void *data_buf;
|
||||
sector_t sector;
|
||||
unsigned int data_size;
|
||||
unsigned short sector_size;
|
||||
const char *disk_name;
|
||||
};
|
||||
|
||||
typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
|
||||
typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
|
||||
typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
|
||||
typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
|
||||
|
||||
struct blk_integrity {
|
||||
integrity_gen_fn *generate_fn;
|
||||
integrity_vrfy_fn *verify_fn;
|
||||
integrity_set_tag_fn *set_tag_fn;
|
||||
integrity_get_tag_fn *get_tag_fn;
|
||||
|
||||
unsigned short flags;
|
||||
unsigned short tuple_size;
|
||||
unsigned short sector_size;
|
||||
unsigned short tag_size;
|
||||
|
||||
const char *name;
|
||||
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
|
||||
extern void blk_integrity_unregister(struct gendisk *);
|
||||
extern int blk_integrity_compare(struct block_device *, struct block_device *);
|
||||
extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
|
||||
extern int blk_rq_count_integrity_sg(struct request *);
|
||||
|
||||
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
|
||||
{
|
||||
if (bi)
|
||||
return bi->tuple_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_disk->integrity;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
|
||||
{
|
||||
struct blk_integrity *bi = bdev_get_integrity(bdev);
|
||||
|
||||
if (bi)
|
||||
return bi->tag_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
|
||||
{
|
||||
struct blk_integrity *bi = bdev_get_integrity(bdev);
|
||||
|
||||
if (bi == NULL)
|
||||
return 0;
|
||||
|
||||
if (rw == READ && bi->verify_fn != NULL &&
|
||||
(bi->flags & INTEGRITY_FLAG_READ))
|
||||
return 1;
|
||||
|
||||
if (rw == WRITE && bi->generate_fn != NULL &&
|
||||
(bi->flags & INTEGRITY_FLAG_WRITE))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int blk_integrity_rq(struct request *rq)
|
||||
{
|
||||
if (rq->bio == NULL)
|
||||
return 0;
|
||||
|
||||
return bio_integrity(rq->bio);
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
#define blk_integrity_rq(rq) (0)
|
||||
#define blk_rq_count_integrity_sg(a) (0)
|
||||
#define blk_rq_map_integrity_sg(a, b) (0)
|
||||
#define bdev_get_integrity(a) (0)
|
||||
#define bdev_get_tag_size(a) (0)
|
||||
#define blk_integrity_compare(a, b) (0)
|
||||
#define blk_integrity_register(a, b) (0)
|
||||
#define blk_integrity_unregister(a) do { } while (0);
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
#else /* CONFIG_BLOCK */
|
||||
/*
|
||||
@@ -877,17 +1015,6 @@ static inline long nr_blockdev_pages(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void exit_io_context(void)
|
||||
{
|
||||
}
|
||||
|
||||
struct io_context;
|
||||
static inline int put_io_context(struct io_context *ioc)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
#endif
|
||||
|
@@ -129,6 +129,7 @@ struct blk_trace {
|
||||
u32 dev;
|
||||
struct dentry *dir;
|
||||
struct dentry *dropped_file;
|
||||
struct dentry *msg_file;
|
||||
atomic_t dropped;
|
||||
};
|
||||
|
||||
|
@@ -165,8 +165,8 @@ struct configfs_item_operations {
|
||||
};
|
||||
|
||||
struct configfs_group_operations {
|
||||
struct config_item *(*make_item)(struct config_group *group, const char *name);
|
||||
struct config_group *(*make_group)(struct config_group *group, const char *name);
|
||||
int (*make_item)(struct config_group *group, const char *name, struct config_item **new_item);
|
||||
int (*make_group)(struct config_group *group, const char *name, struct config_group **new_group);
|
||||
int (*commit_item)(struct config_item *item);
|
||||
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
|
||||
void (*drop_item)(struct config_group *group, struct config_item *item);
|
||||
|
8
include/linux/crc-t10dif.h
Normal file
8
include/linux/crc-t10dif.h
Normal file
@@ -0,0 +1,8 @@
|
||||
#ifndef _LINUX_CRC_T10DIF_H
|
||||
#define _LINUX_CRC_T10DIF_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
__u16 crc_t10dif(unsigned char const *, size_t);
|
||||
|
||||
#endif
|
@@ -30,15 +30,17 @@
|
||||
*/
|
||||
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
|
||||
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
|
||||
#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
|
||||
#define CRYPTO_ALG_TYPE_HASH 0x00000003
|
||||
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
|
||||
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
|
||||
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
|
||||
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
|
||||
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
|
||||
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008
|
||||
#define CRYPTO_ALG_TYPE_AEAD 0x00000009
|
||||
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
|
||||
#define CRYPTO_ALG_TYPE_HASH 0x00000009
|
||||
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
|
||||
|
||||
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
|
||||
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
|
||||
|
||||
#define CRYPTO_ALG_LARVAL 0x00000010
|
||||
@@ -102,6 +104,7 @@ struct crypto_async_request;
|
||||
struct crypto_aead;
|
||||
struct crypto_blkcipher;
|
||||
struct crypto_hash;
|
||||
struct crypto_ahash;
|
||||
struct crypto_tfm;
|
||||
struct crypto_type;
|
||||
struct aead_givcrypt_request;
|
||||
@@ -131,6 +134,16 @@ struct ablkcipher_request {
|
||||
void *__ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
};
|
||||
|
||||
struct ahash_request {
|
||||
struct crypto_async_request base;
|
||||
|
||||
unsigned int nbytes;
|
||||
struct scatterlist *src;
|
||||
u8 *result;
|
||||
|
||||
void *__ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct aead_request - AEAD request
|
||||
* @base: Common attributes for async crypto requests
|
||||
@@ -195,6 +208,17 @@ struct ablkcipher_alg {
|
||||
unsigned int ivsize;
|
||||
};
|
||||
|
||||
struct ahash_alg {
|
||||
int (*init)(struct ahash_request *req);
|
||||
int (*update)(struct ahash_request *req);
|
||||
int (*final)(struct ahash_request *req);
|
||||
int (*digest)(struct ahash_request *req);
|
||||
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
unsigned int digestsize;
|
||||
};
|
||||
|
||||
struct aead_alg {
|
||||
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
@@ -272,6 +296,7 @@ struct compress_alg {
|
||||
#define cra_cipher cra_u.cipher
|
||||
#define cra_digest cra_u.digest
|
||||
#define cra_hash cra_u.hash
|
||||
#define cra_ahash cra_u.ahash
|
||||
#define cra_compress cra_u.compress
|
||||
|
||||
struct crypto_alg {
|
||||
@@ -298,6 +323,7 @@ struct crypto_alg {
|
||||
struct cipher_alg cipher;
|
||||
struct digest_alg digest;
|
||||
struct hash_alg hash;
|
||||
struct ahash_alg ahash;
|
||||
struct compress_alg compress;
|
||||
} cra_u;
|
||||
|
||||
@@ -383,6 +409,18 @@ struct hash_tfm {
|
||||
unsigned int digestsize;
|
||||
};
|
||||
|
||||
struct ahash_tfm {
|
||||
int (*init)(struct ahash_request *req);
|
||||
int (*update)(struct ahash_request *req);
|
||||
int (*final)(struct ahash_request *req);
|
||||
int (*digest)(struct ahash_request *req);
|
||||
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
unsigned int digestsize;
|
||||
unsigned int reqsize;
|
||||
};
|
||||
|
||||
struct compress_tfm {
|
||||
int (*cot_compress)(struct crypto_tfm *tfm,
|
||||
const u8 *src, unsigned int slen,
|
||||
@@ -397,6 +435,7 @@ struct compress_tfm {
|
||||
#define crt_blkcipher crt_u.blkcipher
|
||||
#define crt_cipher crt_u.cipher
|
||||
#define crt_hash crt_u.hash
|
||||
#define crt_ahash crt_u.ahash
|
||||
#define crt_compress crt_u.compress
|
||||
|
||||
struct crypto_tfm {
|
||||
@@ -409,6 +448,7 @@ struct crypto_tfm {
|
||||
struct blkcipher_tfm blkcipher;
|
||||
struct cipher_tfm cipher;
|
||||
struct hash_tfm hash;
|
||||
struct ahash_tfm ahash;
|
||||
struct compress_tfm compress;
|
||||
} crt_u;
|
||||
|
||||
|
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
@@ -68,6 +68,8 @@ struct bus_type {
|
||||
int (*resume_early)(struct device *dev);
|
||||
int (*resume)(struct device *dev);
|
||||
|
||||
struct pm_ext_ops *pm;
|
||||
|
||||
struct bus_type_private *p;
|
||||
};
|
||||
|
||||
@@ -131,6 +133,8 @@ struct device_driver {
|
||||
int (*resume) (struct device *dev);
|
||||
struct attribute_group **groups;
|
||||
|
||||
struct pm_ops *pm;
|
||||
|
||||
struct driver_private *p;
|
||||
};
|
||||
|
||||
@@ -197,6 +201,8 @@ struct class {
|
||||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
|
||||
struct pm_ops *pm;
|
||||
};
|
||||
|
||||
extern int __must_check class_register(struct class *class);
|
||||
@@ -248,8 +254,11 @@ struct device_type {
|
||||
struct attribute_group **groups;
|
||||
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
|
||||
void (*release)(struct device *dev);
|
||||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
|
||||
struct pm_ops *pm;
|
||||
};
|
||||
|
||||
/* interface for exporting device attributes */
|
||||
|
@@ -358,6 +358,7 @@ typedef struct elf64_shdr {
|
||||
#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
|
||||
#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
|
||||
#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
|
||||
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
|
||||
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
|
||||
|
||||
|
||||
|
74
include/linux/firmware-map.h
Normal file
74
include/linux/firmware-map.h
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
* include/linux/firmware-map.h:
|
||||
* Copyright (C) 2008 SUSE LINUX Products GmbH
|
||||
* by Bernhard Walle <bwalle@suse.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License v2.0 as published by
|
||||
* the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef _LINUX_FIRMWARE_MAP_H
|
||||
#define _LINUX_FIRMWARE_MAP_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/kobject.h>
|
||||
|
||||
/*
|
||||
* provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled
|
||||
*/
|
||||
#ifdef CONFIG_FIRMWARE_MEMMAP
|
||||
|
||||
/**
|
||||
* Adds a firmware mapping entry. This function uses kmalloc() for memory
|
||||
* allocation. Use firmware_map_add_early() if you want to use the bootmem
|
||||
* allocator.
|
||||
*
|
||||
* That function must be called before late_initcall.
|
||||
*
|
||||
* @start: Start of the memory range.
|
||||
* @end: End of the memory range (inclusive).
|
||||
* @type: Type of the memory range.
|
||||
*
|
||||
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
|
||||
*/
|
||||
int firmware_map_add(resource_size_t start, resource_size_t end,
|
||||
const char *type);
|
||||
|
||||
/**
|
||||
* Adds a firmware mapping entry. This function uses the bootmem allocator
|
||||
* for memory allocation. Use firmware_map_add() if you want to use kmalloc().
|
||||
*
|
||||
* That function must be called before late_initcall.
|
||||
*
|
||||
* @start: Start of the memory range.
|
||||
* @end: End of the memory range (inclusive).
|
||||
* @type: Type of the memory range.
|
||||
*
|
||||
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
|
||||
*/
|
||||
int firmware_map_add_early(resource_size_t start, resource_size_t end,
|
||||
const char *type);
|
||||
|
||||
#else /* CONFIG_FIRMWARE_MEMMAP */
|
||||
|
||||
static inline int firmware_map_add(resource_size_t start, resource_size_t end,
|
||||
const char *type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int firmware_map_add_early(resource_size_t start,
|
||||
resource_size_t end, const char *type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FIRMWARE_MEMMAP */
|
||||
|
||||
#endif /* _LINUX_FIRMWARE_MAP_H */
|
@@ -1,18 +1,39 @@
|
||||
#ifndef _LINUX_FIRMWARE_H
|
||||
#define _LINUX_FIRMWARE_H
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#define FIRMWARE_NAME_MAX 30
|
||||
#define FW_ACTION_NOHOTPLUG 0
|
||||
#define FW_ACTION_HOTPLUG 1
|
||||
|
||||
struct firmware {
|
||||
size_t size;
|
||||
u8 *data;
|
||||
const u8 *data;
|
||||
};
|
||||
|
||||
struct device;
|
||||
|
||||
struct builtin_fw {
|
||||
char *name;
|
||||
void *data;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
/* We have to play tricks here much like stringify() to get the
|
||||
__COUNTER__ macro to be expanded as we want it */
|
||||
#define __fw_concat1(x, y) x##y
|
||||
#define __fw_concat(x, y) __fw_concat1(x, y)
|
||||
|
||||
#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
|
||||
DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
|
||||
|
||||
#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
|
||||
static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
|
||||
__used __section(.builtin_fw) = { name, blob, size }
|
||||
|
||||
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
|
||||
int request_firmware(const struct firmware **fw, const char *name,
|
||||
struct device *device);
|
||||
|
@@ -127,6 +127,15 @@ static inline void set_freezable(void)
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it and that it
|
||||
* should send a fake signal to the task to freeze it.
|
||||
*/
|
||||
static inline void set_freezable_with_signal(void)
|
||||
{
|
||||
current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freezer-friendly wrappers around wait_event_interruptible() and
|
||||
* wait_event_interruptible_timeout(), originally defined in <linux/wait.h>
|
||||
@@ -174,6 +183,7 @@ static inline void freezer_do_not_count(void) {}
|
||||
static inline void freezer_count(void) {}
|
||||
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
||||
static inline void set_freezable(void) {}
|
||||
static inline void set_freezable_with_signal(void) {}
|
||||
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
wait_event_interruptible(wq, condition)
|
||||
|
@@ -1729,6 +1729,8 @@ static inline void invalidate_remote_inode(struct inode *inode)
|
||||
extern int invalidate_inode_pages2(struct address_space *mapping);
|
||||
extern int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end);
|
||||
extern void generic_sync_sb_inodes(struct super_block *sb,
|
||||
struct writeback_control *wbc);
|
||||
extern int write_inode_now(struct inode *, int);
|
||||
extern int filemap_fdatawrite(struct address_space *);
|
||||
extern int filemap_flush(struct address_space *);
|
||||
@@ -1740,6 +1742,8 @@ extern int wait_on_page_writeback_range(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end);
|
||||
extern int __filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end, int sync_mode);
|
||||
extern int filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end);
|
||||
|
||||
extern long do_fsync(struct file *file, int datasync);
|
||||
extern void sync_supers(void);
|
||||
@@ -1870,7 +1874,8 @@ extern void
|
||||
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
|
||||
extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
|
||||
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
|
||||
extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
|
||||
extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
|
||||
int origin);
|
||||
extern int generic_file_open(struct inode * inode, struct file * filp);
|
||||
extern int nonseekable_open(struct inode * inode, struct file * filp);
|
||||
|
||||
|
144
include/linux/ftrace.h
Normal file
144
include/linux/ftrace.h
Normal file
@@ -0,0 +1,144 @@
|
||||
#ifndef _LINUX_FTRACE_H
|
||||
#define _LINUX_FTRACE_H
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
extern int ftrace_enabled;
|
||||
extern int
|
||||
ftrace_enable_sysctl(struct ctl_table *table, int write,
|
||||
struct file *filp, void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
|
||||
|
||||
struct ftrace_ops {
|
||||
ftrace_func_t func;
|
||||
struct ftrace_ops *next;
|
||||
};
|
||||
|
||||
/*
|
||||
* The ftrace_ops must be a static and should also
|
||||
* be read_mostly. These functions do modify read_mostly variables
|
||||
* so use them sparely. Never free an ftrace_op or modify the
|
||||
* next pointer after it has been registered. Even after unregistering
|
||||
* it, the next pointer may still be used internally.
|
||||
*/
|
||||
int register_ftrace_function(struct ftrace_ops *ops);
|
||||
int unregister_ftrace_function(struct ftrace_ops *ops);
|
||||
void clear_ftrace_function(void);
|
||||
|
||||
extern void ftrace_stub(unsigned long a0, unsigned long a1);
|
||||
|
||||
#else /* !CONFIG_FTRACE */
|
||||
# define register_ftrace_function(ops) do { } while (0)
|
||||
# define unregister_ftrace_function(ops) do { } while (0)
|
||||
# define clear_ftrace_function(ops) do { } while (0)
|
||||
#endif /* CONFIG_FTRACE */
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
# define FTRACE_HASHBITS 10
|
||||
# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS)
|
||||
|
||||
enum {
|
||||
FTRACE_FL_FREE = (1 << 0),
|
||||
FTRACE_FL_FAILED = (1 << 1),
|
||||
FTRACE_FL_FILTER = (1 << 2),
|
||||
FTRACE_FL_ENABLED = (1 << 3),
|
||||
FTRACE_FL_NOTRACE = (1 << 4),
|
||||
FTRACE_FL_CONVERTED = (1 << 5),
|
||||
FTRACE_FL_FROZEN = (1 << 6),
|
||||
};
|
||||
|
||||
struct dyn_ftrace {
|
||||
struct hlist_node node;
|
||||
unsigned long ip; /* address of mcount call-site */
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
int ftrace_force_update(void);
|
||||
void ftrace_set_filter(unsigned char *buf, int len, int reset);
|
||||
|
||||
/* defined in arch */
|
||||
extern int ftrace_ip_converted(unsigned long ip);
|
||||
extern unsigned char *ftrace_nop_replace(void);
|
||||
extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
|
||||
extern int ftrace_dyn_arch_init(void *data);
|
||||
extern int ftrace_mcount_set(unsigned long *data);
|
||||
extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
||||
unsigned char *new_code);
|
||||
extern int ftrace_update_ftrace_func(ftrace_func_t func);
|
||||
extern void ftrace_caller(void);
|
||||
extern void ftrace_call(void);
|
||||
extern void mcount_call(void);
|
||||
|
||||
extern int skip_trace(unsigned long ip);
|
||||
|
||||
void ftrace_disable_daemon(void);
|
||||
void ftrace_enable_daemon(void);
|
||||
|
||||
#else
|
||||
# define skip_trace(ip) ({ 0; })
|
||||
# define ftrace_force_update() ({ 0; })
|
||||
# define ftrace_set_filter(buf, len, reset) do { } while (0)
|
||||
# define ftrace_disable_daemon() do { } while (0)
|
||||
# define ftrace_enable_daemon() do { } while (0)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
/* totally disable ftrace - can not re-enable after this */
|
||||
void ftrace_kill(void);
|
||||
void ftrace_kill_atomic(void);
|
||||
|
||||
static inline void tracer_disable(void)
|
||||
{
|
||||
#ifdef CONFIG_FTRACE
|
||||
ftrace_enabled = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
/* TODO: need to fix this for ARM */
|
||||
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
||||
# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
|
||||
# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
|
||||
# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
|
||||
# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
|
||||
# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
|
||||
# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
|
||||
#else
|
||||
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
||||
# define CALLER_ADDR1 0UL
|
||||
# define CALLER_ADDR2 0UL
|
||||
# define CALLER_ADDR3 0UL
|
||||
# define CALLER_ADDR4 0UL
|
||||
# define CALLER_ADDR5 0UL
|
||||
# define CALLER_ADDR6 0UL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
|
||||
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
|
||||
#else
|
||||
# define time_hardirqs_on(a0, a1) do { } while (0)
|
||||
# define time_hardirqs_off(a0, a1) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_TRACER
|
||||
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
|
||||
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
|
||||
#else
|
||||
# define trace_preempt_on(a0, a1) do { } while (0)
|
||||
# define trace_preempt_off(a0, a1) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void
|
||||
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
|
||||
#else
|
||||
static inline void
|
||||
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_FTRACE_H */
|
@@ -110,6 +110,14 @@ struct hd_struct {
|
||||
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
|
||||
#define GENHD_FL_FAIL 64
|
||||
|
||||
#define BLK_SCSI_MAX_CMDS (256)
|
||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||
|
||||
struct blk_scsi_cmd_filter {
|
||||
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
struct gendisk {
|
||||
int major; /* major number of driver */
|
||||
@@ -120,6 +128,7 @@ struct gendisk {
|
||||
struct hd_struct **part; /* [indexed by minor] */
|
||||
struct block_device_operations *fops;
|
||||
struct request_queue *queue;
|
||||
struct blk_scsi_cmd_filter cmd_filter;
|
||||
void *private_data;
|
||||
sector_t capacity;
|
||||
|
||||
@@ -141,6 +150,9 @@ struct gendisk {
|
||||
struct disk_stats dkstats;
|
||||
#endif
|
||||
struct work_struct async_notify;
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
struct blk_integrity *integrity;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -33,9 +33,11 @@ struct i2c_algo_pcf_data {
|
||||
int (*getclock) (void *data);
|
||||
void (*waitforpin) (void);
|
||||
|
||||
/* local settings */
|
||||
int udelay;
|
||||
int timeout;
|
||||
/* Multi-master lost arbitration back-off delay (msecs)
|
||||
* This should be set by the bus adapter or knowledgable client
|
||||
* if bus is multi-mastered, else zero
|
||||
*/
|
||||
unsigned long lab_mdelay;
|
||||
};
|
||||
|
||||
int i2c_pcf_add_bus(struct i2c_adapter *);
|
||||
|
@@ -91,8 +91,6 @@
|
||||
#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
|
||||
#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
|
||||
|
||||
#define I2C_DRIVERID_I2CDEV 900
|
||||
|
||||
#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
|
||||
|
||||
/*
|
||||
@@ -111,7 +109,6 @@
|
||||
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */
|
||||
#define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */
|
||||
#define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */
|
||||
#define I2C_HW_B_S3VIA 0x010018 /* S3Via ProSavage adapter */
|
||||
#define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */
|
||||
#define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */
|
||||
#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */
|
||||
|
@@ -35,6 +35,8 @@
|
||||
#include <linux/sched.h> /* for completion */
|
||||
#include <linux/mutex.h>
|
||||
|
||||
extern struct bus_type i2c_bus_type;
|
||||
|
||||
/* --- General options ------------------------------------------------ */
|
||||
|
||||
struct i2c_msg;
|
||||
@@ -43,6 +45,7 @@ struct i2c_adapter;
|
||||
struct i2c_client;
|
||||
struct i2c_driver;
|
||||
union i2c_smbus_data;
|
||||
struct i2c_board_info;
|
||||
|
||||
/*
|
||||
* The master routines are the ones normally used to transmit data to devices
|
||||
@@ -69,9 +72,8 @@ extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr,
|
||||
union i2c_smbus_data * data);
|
||||
|
||||
/* Now follow the 'nice' access routines. These also document the calling
|
||||
conventions of smbus_access. */
|
||||
conventions of i2c_smbus_xfer. */
|
||||
|
||||
extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value);
|
||||
extern s32 i2c_smbus_read_byte(struct i2c_client * client);
|
||||
extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value);
|
||||
extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command);
|
||||
@@ -93,15 +95,33 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
|
||||
u8 command, u8 length,
|
||||
const u8 *values);
|
||||
|
||||
/*
|
||||
* A driver is capable of handling one or more physical devices present on
|
||||
* I2C adapters. This information is used to inform the driver of adapter
|
||||
* events.
|
||||
/**
|
||||
* struct i2c_driver - represent an I2C device driver
|
||||
* @class: What kind of i2c device we instantiate (for detect)
|
||||
* @detect: Callback for device detection
|
||||
* @address_data: The I2C addresses to probe, ignore or force (for detect)
|
||||
* @clients: List of detected clients we created (for i2c-core use only)
|
||||
*
|
||||
* The driver.owner field should be set to the module owner of this driver.
|
||||
* The driver.name field should be set to the name of this driver.
|
||||
*
|
||||
* For automatic device detection, both @detect and @address_data must
|
||||
* be defined. @class should also be set, otherwise only devices forced
|
||||
* with module parameters will be created. The detect function must
|
||||
* fill at least the name field of the i2c_board_info structure it is
|
||||
* handed upon successful detection, and possibly also the flags field.
|
||||
*
|
||||
* If @detect is missing, the driver will still work fine for enumerated
|
||||
* devices. Detected devices simply won't be supported. This is expected
|
||||
* for the many I2C/SMBus devices which can't be detected reliably, and
|
||||
* the ones which can always be enumerated in practice.
|
||||
*
|
||||
* The i2c_client structure which is handed to the @detect callback is
|
||||
* not a real i2c_client. It is initialized just enough so that you can
|
||||
* call i2c_smbus_read_byte_data and friends on it. Don't do anything
|
||||
* else with it. In particular, calling dev_dbg and friends on it is
|
||||
* not allowed.
|
||||
*/
|
||||
|
||||
struct i2c_driver {
|
||||
int id;
|
||||
unsigned int class;
|
||||
@@ -141,6 +161,11 @@ struct i2c_driver {
|
||||
|
||||
struct device_driver driver;
|
||||
const struct i2c_device_id *id_table;
|
||||
|
||||
/* Device detection callback for automatic device creation */
|
||||
int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
|
||||
const struct i2c_client_address_data *address_data;
|
||||
struct list_head clients;
|
||||
};
|
||||
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
|
||||
|
||||
@@ -156,6 +181,7 @@ struct i2c_driver {
|
||||
* @dev: Driver model device node for the slave.
|
||||
* @irq: indicates the IRQ generated by this device (if any)
|
||||
* @list: list of active/busy clients (DEPRECATED)
|
||||
* @detected: member of an i2c_driver.clients list
|
||||
* @released: used to synchronize client releases & detaches and references
|
||||
*
|
||||
* An i2c_client identifies a single device (i.e. chip) connected to an
|
||||
@@ -173,6 +199,7 @@ struct i2c_client {
|
||||
struct device dev; /* the device structure */
|
||||
int irq; /* irq issued by device */
|
||||
struct list_head list; /* DEPRECATED */
|
||||
struct list_head detected;
|
||||
struct completion released;
|
||||
};
|
||||
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
|
||||
@@ -350,10 +377,11 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
|
||||
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
|
||||
#define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */
|
||||
#define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */
|
||||
#define I2C_CLASS_DDC (1<<3) /* i2c-matroxfb ? */
|
||||
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
|
||||
#define I2C_CLASS_CAM_ANALOG (1<<4) /* camera with analog CCD */
|
||||
#define I2C_CLASS_CAM_DIGITAL (1<<5) /* most webcams */
|
||||
#define I2C_CLASS_SOUND (1<<6) /* sound devices */
|
||||
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
|
||||
#define I2C_CLASS_ALL (UINT_MAX) /* all of the above */
|
||||
|
||||
/* i2c_client_address_data is the struct for holding default client
|
||||
@@ -537,7 +565,7 @@ union i2c_smbus_data {
|
||||
/* and one more for user-space compatibility */
|
||||
};
|
||||
|
||||
/* smbus_access read or write markers */
|
||||
/* i2c_smbus_xfer read or write markers */
|
||||
#define I2C_SMBUS_READ 1
|
||||
#define I2C_SMBUS_WRITE 0
|
||||
|
||||
|
28
include/linux/i2c/at24.h
Normal file
28
include/linux/i2c/at24.h
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef _LINUX_AT24_H
|
||||
#define _LINUX_AT24_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* As seen through Linux I2C, differences between the most common types of I2C
|
||||
* memory include:
|
||||
* - How much memory is available (usually specified in bit)?
|
||||
* - What write page size does it support?
|
||||
* - Special flags (16 bit addresses, read_only, world readable...)?
|
||||
*
|
||||
* If you set up a custom eeprom type, please double-check the parameters.
|
||||
* Especially page_size needs extra care, as you risk data loss if your value
|
||||
* is bigger than what the chip actually supports!
|
||||
*/
|
||||
|
||||
struct at24_platform_data {
|
||||
u32 byte_len; /* size (sum of all addr) */
|
||||
u16 page_size; /* for writes */
|
||||
u8 flags;
|
||||
#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */
|
||||
#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
|
||||
#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
|
||||
#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
|
||||
};
|
||||
|
||||
#endif /* _LINUX_AT24_H */
|
@@ -138,6 +138,12 @@ struct ide_io_ports {
|
||||
#define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */
|
||||
#define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */
|
||||
|
||||
/*
|
||||
* Op codes for special requests to be handled by ide_special_rq().
|
||||
* Values should be in the range of 0x20 to 0x3f.
|
||||
*/
|
||||
#define REQ_DRIVE_RESET 0x20
|
||||
|
||||
/*
|
||||
* Check for an interrupt and acknowledge the interrupt status
|
||||
*/
|
||||
@@ -171,7 +177,7 @@ typedef struct hw_regs_s {
|
||||
int irq; /* our irq number */
|
||||
ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
|
||||
hwif_chipset_t chipset;
|
||||
struct device *dev;
|
||||
struct device *dev, *parent;
|
||||
} hw_regs_t;
|
||||
|
||||
void ide_init_port_data(struct hwif_s *, unsigned int);
|
||||
@@ -364,7 +370,6 @@ typedef struct ide_drive_s {
|
||||
u8 wcache; /* status of write cache */
|
||||
u8 acoustic; /* acoustic management */
|
||||
u8 media; /* disk, cdrom, tape, floppy, ... */
|
||||
u8 ctl; /* "normal" value for Control register */
|
||||
u8 ready_stat; /* min status value for drive ready */
|
||||
u8 mult_count; /* current multiple sector setting */
|
||||
u8 mult_req; /* requested multiple sector setting */
|
||||
@@ -406,8 +411,8 @@ typedef struct ide_drive_s {
|
||||
struct ide_port_info;
|
||||
|
||||
struct ide_port_ops {
|
||||
/* host specific initialization of devices on a port */
|
||||
void (*port_init_devs)(struct hwif_s *);
|
||||
/* host specific initialization of a device */
|
||||
void (*init_dev)(ide_drive_t *);
|
||||
/* routine to program host for PIO mode */
|
||||
void (*set_pio_mode)(ide_drive_t *, const u8);
|
||||
/* routine to program host for DMA mode */
|
||||
@@ -493,7 +498,7 @@ typedef struct hwif_s {
|
||||
void (*ide_dma_clear_irq)(ide_drive_t *drive);
|
||||
|
||||
void (*OUTB)(u8 addr, unsigned long port);
|
||||
void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
|
||||
void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port);
|
||||
|
||||
u8 (*INB)(unsigned long port);
|
||||
|
||||
@@ -532,7 +537,6 @@ typedef struct hwif_s {
|
||||
unsigned serialized : 1; /* serialized all channel operation */
|
||||
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
|
||||
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
|
||||
unsigned mmio : 1; /* host uses MMIO */
|
||||
|
||||
struct device gendev;
|
||||
struct device *portdev;
|
||||
@@ -567,8 +571,6 @@ typedef struct hwgroup_s {
|
||||
unsigned int sleeping : 1;
|
||||
/* BOOL: polling active & poll_timeout field valid */
|
||||
unsigned int polling : 1;
|
||||
/* BOOL: in a polling reset situation. Must not trigger another reset yet */
|
||||
unsigned int resetting : 1;
|
||||
|
||||
/* current drive */
|
||||
ide_drive_t *drive;
|
||||
@@ -604,12 +606,13 @@ enum {
|
||||
PC_FLAG_SUPPRESS_ERROR = (1 << 1),
|
||||
PC_FLAG_WAIT_FOR_DSC = (1 << 2),
|
||||
PC_FLAG_DMA_OK = (1 << 3),
|
||||
PC_FLAG_DMA_RECOMMENDED = (1 << 4),
|
||||
PC_FLAG_DMA_IN_PROGRESS = (1 << 5),
|
||||
PC_FLAG_DMA_ERROR = (1 << 6),
|
||||
PC_FLAG_WRITING = (1 << 7),
|
||||
PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
|
||||
PC_FLAG_DMA_ERROR = (1 << 5),
|
||||
PC_FLAG_WRITING = (1 << 6),
|
||||
/* command timed out */
|
||||
PC_FLAG_TIMEDOUT = (1 << 8),
|
||||
PC_FLAG_TIMEDOUT = (1 << 7),
|
||||
PC_FLAG_ZIP_DRIVE = (1 << 8),
|
||||
PC_FLAG_DRQ_INTERRUPT = (1 << 9),
|
||||
};
|
||||
|
||||
struct ide_atapi_pc {
|
||||
@@ -642,8 +645,8 @@ struct ide_atapi_pc {
|
||||
* to change/removal later.
|
||||
*/
|
||||
u8 pc_buf[256];
|
||||
void (*idefloppy_callback) (ide_drive_t *);
|
||||
ide_startstop_t (*idetape_callback) (ide_drive_t *);
|
||||
|
||||
void (*callback)(ide_drive_t *);
|
||||
|
||||
/* idetape only */
|
||||
struct idetape_bh *bh;
|
||||
@@ -787,7 +790,6 @@ struct ide_driver_s {
|
||||
ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
|
||||
int (*end_request)(ide_drive_t *, int, int);
|
||||
ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8);
|
||||
ide_startstop_t (*abort)(ide_drive_t *, struct request *rq);
|
||||
struct device_driver gen_driver;
|
||||
int (*probe)(ide_drive_t *);
|
||||
void (*remove)(ide_drive_t *);
|
||||
@@ -802,22 +804,6 @@ struct ide_driver_s {
|
||||
|
||||
int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long);
|
||||
|
||||
/*
|
||||
* ide_hwifs[] is the master data structure used to keep track
|
||||
* of just about everything in ide.c. Whenever possible, routines
|
||||
* should be using pointers to a drive (ide_drive_t *) or
|
||||
* pointers to a hwif (ide_hwif_t *), rather than indexing this
|
||||
* structure directly (the allocation/layout may change!).
|
||||
*
|
||||
*/
|
||||
#ifndef _IDE_C
|
||||
extern ide_hwif_t ide_hwifs[]; /* master data repository */
|
||||
#endif
|
||||
extern int ide_noacpi;
|
||||
extern int ide_acpigtf;
|
||||
extern int ide_acpionboot;
|
||||
extern int noautodma;
|
||||
|
||||
extern int ide_vlb_clk;
|
||||
extern int ide_pci_clk;
|
||||
|
||||
@@ -845,10 +831,6 @@ ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
|
||||
|
||||
ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat);
|
||||
|
||||
ide_startstop_t __ide_abort(ide_drive_t *, struct request *);
|
||||
|
||||
extern ide_startstop_t ide_abort(ide_drive_t *, const char *);
|
||||
|
||||
extern void ide_fix_driveid(struct hd_driveid *);
|
||||
|
||||
extern void ide_fixstring(u8 *, const int, const int);
|
||||
@@ -857,25 +839,12 @@ int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
|
||||
|
||||
extern ide_startstop_t ide_do_reset (ide_drive_t *);
|
||||
|
||||
extern void ide_init_drive_cmd (struct request *rq);
|
||||
|
||||
/*
|
||||
* "action" parameter type for ide_do_drive_cmd() below.
|
||||
*/
|
||||
typedef enum {
|
||||
ide_wait, /* insert rq at end of list, and wait for it */
|
||||
ide_preempt, /* insert rq in front of current request */
|
||||
ide_head_wait, /* insert rq in front of current request and wait for it */
|
||||
ide_end /* insert rq at end of list, but don't wait for it */
|
||||
} ide_action_t;
|
||||
|
||||
extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t);
|
||||
extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
|
||||
|
||||
extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
|
||||
|
||||
enum {
|
||||
IDE_TFLAG_LBA48 = (1 << 0),
|
||||
IDE_TFLAG_NO_SELECT_MASK = (1 << 1),
|
||||
IDE_TFLAG_FLAGGED = (1 << 2),
|
||||
IDE_TFLAG_OUT_DATA = (1 << 3),
|
||||
IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
|
||||
@@ -980,11 +949,23 @@ typedef struct ide_task_s {
|
||||
void ide_tf_dump(const char *, struct ide_taskfile *);
|
||||
|
||||
extern void SELECT_DRIVE(ide_drive_t *);
|
||||
void SELECT_MASK(ide_drive_t *, int);
|
||||
|
||||
extern int drive_is_ready(ide_drive_t *);
|
||||
|
||||
void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
|
||||
|
||||
ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
|
||||
ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry,
|
||||
void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *),
|
||||
void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *),
|
||||
void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int,
|
||||
int));
|
||||
ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *,
|
||||
ide_handler_t *, unsigned int, ide_expiry_t *);
|
||||
ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_atapi_pc *,
|
||||
ide_handler_t *, unsigned int, ide_expiry_t *);
|
||||
|
||||
ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
|
||||
|
||||
void task_end_request(ide_drive_t *, struct request *, u8);
|
||||
@@ -996,8 +977,6 @@ int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
|
||||
int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long);
|
||||
int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long);
|
||||
|
||||
extern int system_bus_clock(void);
|
||||
|
||||
extern int ide_driveid_update(ide_drive_t *);
|
||||
extern int ide_config_drive_speed(ide_drive_t *, u8);
|
||||
extern u8 eighty_ninty_three (ide_drive_t *);
|
||||
@@ -1279,16 +1258,43 @@ static inline int ide_dev_is_sata(struct hd_driveid *id)
|
||||
u64 ide_get_lba_addr(struct ide_taskfile *, int);
|
||||
u8 ide_dump_status(ide_drive_t *, const char *, u8);
|
||||
|
||||
typedef struct ide_pio_timings_s {
|
||||
int setup_time; /* Address setup (ns) minimum */
|
||||
int active_time; /* Active pulse (ns) minimum */
|
||||
int cycle_time; /* Cycle time (ns) minimum = */
|
||||
/* active + recovery (+ setup for some chips) */
|
||||
} ide_pio_timings_t;
|
||||
struct ide_timing {
|
||||
u8 mode;
|
||||
u8 setup; /* t1 */
|
||||
u16 act8b; /* t2 for 8-bit io */
|
||||
u16 rec8b; /* t2i for 8-bit io */
|
||||
u16 cyc8b; /* t0 for 8-bit io */
|
||||
u16 active; /* t2 or tD */
|
||||
u16 recover; /* t2i or tK */
|
||||
u16 cycle; /* t0 */
|
||||
u16 udma; /* t2CYCTYP/2 */
|
||||
};
|
||||
|
||||
enum {
|
||||
IDE_TIMING_SETUP = (1 << 0),
|
||||
IDE_TIMING_ACT8B = (1 << 1),
|
||||
IDE_TIMING_REC8B = (1 << 2),
|
||||
IDE_TIMING_CYC8B = (1 << 3),
|
||||
IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
|
||||
IDE_TIMING_CYC8B,
|
||||
IDE_TIMING_ACTIVE = (1 << 4),
|
||||
IDE_TIMING_RECOVER = (1 << 5),
|
||||
IDE_TIMING_CYCLE = (1 << 6),
|
||||
IDE_TIMING_UDMA = (1 << 7),
|
||||
IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
|
||||
IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
|
||||
IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
|
||||
};
|
||||
|
||||
struct ide_timing *ide_timing_find_mode(u8);
|
||||
u16 ide_pio_cycle_time(ide_drive_t *, u8);
|
||||
void ide_timing_merge(struct ide_timing *, struct ide_timing *,
|
||||
struct ide_timing *, unsigned int);
|
||||
int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
|
||||
|
||||
int ide_scan_pio_blacklist(char *);
|
||||
|
||||
unsigned int ide_pio_cycle_time(ide_drive_t *, u8);
|
||||
u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
|
||||
extern const ide_pio_timings_t ide_pio_timings[6];
|
||||
|
||||
int ide_set_pio_mode(ide_drive_t *, u8);
|
||||
int ide_set_dma_mode(ide_drive_t *, u8);
|
||||
@@ -1349,7 +1355,8 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
|
||||
{
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
||||
hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
|
||||
hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2),
|
||||
hwif->io_ports.ctl_addr);
|
||||
}
|
||||
|
||||
static inline u8 ide_read_status(ide_drive_t *drive)
|
||||
|
74
include/linux/ihex.h
Normal file
74
include/linux/ihex.h
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Compact binary representation of ihex records. Some devices need their
|
||||
* firmware loaded in strange orders rather than a single big blob, but
|
||||
* actually parsing ihex-as-text within the kernel seems silly. Thus,...
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_IHEX_H__
|
||||
#define __LINUX_IHEX_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/* Intel HEX files actually limit the length to 256 bytes, but we have
|
||||
drivers which would benefit from using separate records which are
|
||||
longer than that, so we extend to 16 bits of length */
|
||||
struct ihex_binrec {
|
||||
__be32 addr;
|
||||
__be16 len;
|
||||
uint8_t data[0];
|
||||
} __attribute__((aligned(4)));
|
||||
|
||||
/* Find the next record, taking into account the 4-byte alignment */
|
||||
static inline const struct ihex_binrec *
|
||||
ihex_next_binrec(const struct ihex_binrec *rec)
|
||||
{
|
||||
int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
|
||||
rec = (void *)&rec->data[next];
|
||||
|
||||
return be16_to_cpu(rec->len) ? rec : NULL;
|
||||
}
|
||||
|
||||
/* Check that ihex_next_binrec() won't take us off the end of the image... */
|
||||
static inline int ihex_validate_fw(const struct firmware *fw)
|
||||
{
|
||||
const struct ihex_binrec *rec;
|
||||
size_t ofs = 0;
|
||||
|
||||
while (ofs <= fw->size - sizeof(*rec)) {
|
||||
rec = (void *)&fw->data[ofs];
|
||||
|
||||
/* Zero length marks end of records */
|
||||
if (!be16_to_cpu(rec->len))
|
||||
return 0;
|
||||
|
||||
/* Point to next record... */
|
||||
ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Request firmware and validate it so that we can trust we won't
|
||||
* run off the end while reading records... */
|
||||
static inline int request_ihex_firmware(const struct firmware **fw,
|
||||
const char *fw_name,
|
||||
struct device *dev)
|
||||
{
|
||||
const struct firmware *lfw;
|
||||
int ret;
|
||||
|
||||
ret = request_firmware(&lfw, fw_name, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ihex_validate_fw(lfw);
|
||||
if (ret) {
|
||||
dev_err(dev, "Firmware \"%s\" not valid IHEX records\n",
|
||||
fw_name);
|
||||
release_firmware(lfw);
|
||||
return ret;
|
||||
}
|
||||
*fw = lfw;
|
||||
return 0;
|
||||
}
|
||||
#endif /* __LINUX_IHEX_H__ */
|
@@ -44,6 +44,13 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* These mimic similar macros defined in user-space for inet_ntop(3).
|
||||
* See /usr/include/netinet/in.h .
|
||||
*/
|
||||
#define INET_ADDRSTRLEN (16)
|
||||
#define INET6_ADDRSTRLEN (48)
|
||||
|
||||
extern __be32 in_aton(const char *str);
|
||||
extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
|
||||
extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
|
||||
|
@@ -140,8 +140,8 @@ extern struct group_info init_groups;
|
||||
.nr_cpus_allowed = NR_CPUS, \
|
||||
}, \
|
||||
.tasks = LIST_HEAD_INIT(tsk.tasks), \
|
||||
.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
|
||||
.ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
|
||||
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
|
||||
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
|
||||
.real_parent = &tsk, \
|
||||
.parent = &tsk, \
|
||||
.children = LIST_HEAD_INIT(tsk.children), \
|
||||
|
@@ -104,8 +104,11 @@ extern void enable_irq(unsigned int irq);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
|
||||
|
||||
extern cpumask_t irq_default_affinity;
|
||||
|
||||
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
|
||||
extern int irq_can_set_affinity(unsigned int irq);
|
||||
extern int irq_select_affinity(unsigned int irq);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
@@ -119,6 +122,8 @@ static inline int irq_can_set_affinity(unsigned int irq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int irq_select_affinity(unsigned int irq) { return 0; }
|
||||
|
||||
#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
@@ -285,12 +290,11 @@ enum
|
||||
struct softirq_action
|
||||
{
|
||||
void (*action)(struct softirq_action *);
|
||||
void *data;
|
||||
};
|
||||
|
||||
asmlinkage void do_softirq(void);
|
||||
asmlinkage void __do_softirq(void);
|
||||
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
|
||||
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
|
||||
extern void softirq_init(void);
|
||||
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
|
||||
extern void raise_softirq_irqoff(unsigned int nr);
|
||||
|
@@ -99,4 +99,22 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
int put_io_context(struct io_context *ioc);
|
||||
void exit_io_context(void);
|
||||
struct io_context *get_io_context(gfp_t gfp_flags, int node);
|
||||
struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
|
||||
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
|
||||
#else
|
||||
static inline void exit_io_context(void)
|
||||
{
|
||||
}
|
||||
|
||||
struct io_context;
|
||||
static inline int put_io_context(struct io_context *ioc)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -59,6 +59,7 @@ struct resource_list {
|
||||
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
|
||||
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
|
||||
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
|
||||
#define IORESOURCE_IRQ_OPTIONAL (1<<5)
|
||||
|
||||
/* PnP DMA specific bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
|
||||
@@ -88,6 +89,10 @@ struct resource_list {
|
||||
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
|
||||
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
|
||||
|
||||
/* PnP I/O specific bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
|
||||
#define IORESOURCE_IO_FIXED (1<<1)
|
||||
|
||||
/* PCI ROM control bits (IORESOURCE_BITS) */
|
||||
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
|
||||
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
|
||||
|
@@ -244,15 +244,6 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUTO_IRQ_AFFINITY
|
||||
extern int select_smp_affinity(unsigned int irq);
|
||||
#else
|
||||
static inline int select_smp_affinity(unsigned int irq)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int no_irq_affinity;
|
||||
|
||||
static inline int irq_balancing_disabled(unsigned int irq)
|
||||
|
@@ -12,10 +12,10 @@
|
||||
#define _LINUX_TRACE_IRQFLAGS_H
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
extern void trace_hardirqs_on(void);
|
||||
extern void trace_hardirqs_off(void);
|
||||
extern void trace_softirqs_on(unsigned long ip);
|
||||
extern void trace_softirqs_off(unsigned long ip);
|
||||
extern void trace_hardirqs_on(void);
|
||||
extern void trace_hardirqs_off(void);
|
||||
# define trace_hardirq_context(p) ((p)->hardirq_context)
|
||||
# define trace_softirq_context(p) ((p)->softirq_context)
|
||||
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
|
||||
@@ -41,6 +41,15 @@
|
||||
# define INIT_TRACE_IRQFLAGS
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_IRQSOFF_TRACER) || \
|
||||
defined(CONFIG_PREEMPT_TRACER)
|
||||
extern void stop_critical_timings(void);
|
||||
extern void start_critical_timings(void);
|
||||
#else
|
||||
# define stop_critical_timings() do { } while (0)
|
||||
# define start_critical_timings() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
||||
|
||||
#include <asm/irqflags.h>
|
||||
|
@@ -168,6 +168,8 @@ struct commit_header {
|
||||
unsigned char h_chksum_size;
|
||||
unsigned char h_padding[2];
|
||||
__be32 h_chksum[JBD2_CHECKSUM_BYTES];
|
||||
__be64 h_commit_sec;
|
||||
__be32 h_commit_nsec;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -379,6 +381,38 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
|
||||
bit_spin_unlock(BH_JournalHead, &bh->b_state);
|
||||
}
|
||||
|
||||
/* Flags in jbd_inode->i_flags */
|
||||
#define __JI_COMMIT_RUNNING 0
|
||||
/* Commit of the inode data in progress. We use this flag to protect us from
|
||||
* concurrent deletion of inode. We cannot use reference to inode for this
|
||||
* since we cannot afford doing last iput() on behalf of kjournald
|
||||
*/
|
||||
#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING)
|
||||
|
||||
/**
|
||||
* struct jbd_inode is the structure linking inodes in ordered mode
|
||||
* present in a transaction so that we can sync them during commit.
|
||||
*/
|
||||
struct jbd2_inode {
|
||||
/* Which transaction does this inode belong to? Either the running
|
||||
* transaction or the committing one. [j_list_lock] */
|
||||
transaction_t *i_transaction;
|
||||
|
||||
/* Pointer to the running transaction modifying inode's data in case
|
||||
* there is already a committing transaction touching it. [j_list_lock] */
|
||||
transaction_t *i_next_transaction;
|
||||
|
||||
/* List of inodes in the i_transaction [j_list_lock] */
|
||||
struct list_head i_list;
|
||||
|
||||
/* VFS inode this inode belongs to [constant during the lifetime
|
||||
* of the structure] */
|
||||
struct inode *i_vfs_inode;
|
||||
|
||||
/* Flags of inode [j_list_lock] */
|
||||
unsigned int i_flags;
|
||||
};
|
||||
|
||||
struct jbd2_revoke_table_s;
|
||||
|
||||
/**
|
||||
@@ -508,24 +542,12 @@ struct transaction_s
|
||||
*/
|
||||
struct journal_head *t_reserved_list;
|
||||
|
||||
/*
|
||||
* Doubly-linked circular list of all buffers under writeout during
|
||||
* commit [j_list_lock]
|
||||
*/
|
||||
struct journal_head *t_locked_list;
|
||||
|
||||
/*
|
||||
* Doubly-linked circular list of all metadata buffers owned by this
|
||||
* transaction [j_list_lock]
|
||||
*/
|
||||
struct journal_head *t_buffers;
|
||||
|
||||
/*
|
||||
* Doubly-linked circular list of all data buffers still to be
|
||||
* flushed before this transaction can be committed [j_list_lock]
|
||||
*/
|
||||
struct journal_head *t_sync_datalist;
|
||||
|
||||
/*
|
||||
* Doubly-linked circular list of all forget buffers (superseded
|
||||
* buffers which we can un-checkpoint once this transaction commits)
|
||||
@@ -564,6 +586,12 @@ struct transaction_s
|
||||
*/
|
||||
struct journal_head *t_log_list;
|
||||
|
||||
/*
|
||||
* List of inodes whose data we've modified in data=ordered mode.
|
||||
* [j_list_lock]
|
||||
*/
|
||||
struct list_head t_inode_list;
|
||||
|
||||
/*
|
||||
* Protects info related to handles
|
||||
*/
|
||||
@@ -1004,7 +1032,6 @@ extern int jbd2_journal_extend (handle_t *, int nblocks);
|
||||
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
|
||||
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
|
||||
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
|
||||
extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
|
||||
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
|
||||
extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
|
||||
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
|
||||
@@ -1044,6 +1071,10 @@ extern void jbd2_journal_ack_err (journal_t *);
|
||||
extern int jbd2_journal_clear_err (journal_t *);
|
||||
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
|
||||
extern int jbd2_journal_force_commit(journal_t *);
|
||||
extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
|
||||
extern int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size);
|
||||
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
|
||||
extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode);
|
||||
|
||||
/*
|
||||
* journal_head management
|
||||
@@ -1179,15 +1210,13 @@ static inline int jbd_space_needed(journal_t *journal)
|
||||
|
||||
/* journaling buffer types */
|
||||
#define BJ_None 0 /* Not journaled */
|
||||
#define BJ_SyncData 1 /* Normal data: flush before commit */
|
||||
#define BJ_Metadata 2 /* Normal journaled metadata */
|
||||
#define BJ_Forget 3 /* Buffer superseded by this transaction */
|
||||
#define BJ_IO 4 /* Buffer is for temporary IO use */
|
||||
#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
|
||||
#define BJ_LogCtl 6 /* Buffer contains log descriptors */
|
||||
#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
|
||||
#define BJ_Locked 8 /* Locked for I/O during commit */
|
||||
#define BJ_Types 9
|
||||
#define BJ_Metadata 1 /* Normal journaled metadata */
|
||||
#define BJ_Forget 2 /* Buffer superseded by this transaction */
|
||||
#define BJ_IO 3 /* Buffer is for temporary IO use */
|
||||
#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */
|
||||
#define BJ_LogCtl 5 /* Buffer contains log descriptors */
|
||||
#define BJ_Reserved 6 /* Buffer is reserved for access by journal */
|
||||
#define BJ_Types 7
|
||||
|
||||
extern int jbd_blocks_per_page(struct inode *inode);
|
||||
|
||||
|
@@ -187,9 +187,6 @@ asmlinkage int vprintk(const char *fmt, va_list args)
|
||||
__attribute__ ((format (printf, 1, 0)));
|
||||
asmlinkage int printk(const char * fmt, ...)
|
||||
__attribute__ ((format (printf, 1, 2))) __cold;
|
||||
extern int log_buf_get_len(void);
|
||||
extern int log_buf_read(int idx);
|
||||
extern int log_buf_copy(char *dest, int idx, int len);
|
||||
|
||||
extern int printk_ratelimit_jiffies;
|
||||
extern int printk_ratelimit_burst;
|
||||
@@ -205,9 +202,6 @@ static inline int vprintk(const char *s, va_list args) { return 0; }
|
||||
static inline int printk(const char *s, ...)
|
||||
__attribute__ ((format (printf, 1, 2)));
|
||||
static inline int __cold printk(const char *s, ...) { return 0; }
|
||||
static inline int log_buf_get_len(void) { return 0; }
|
||||
static inline int log_buf_read(int idx) { return 0; }
|
||||
static inline int log_buf_copy(char *dest, int idx, int len) { return 0; }
|
||||
static inline int printk_ratelimit(void) { return 0; }
|
||||
static inline int __printk_ratelimit(int ratelimit_jiffies, \
|
||||
int ratelimit_burst) { return 0; }
|
||||
@@ -216,7 +210,7 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
|
||||
{ return false; }
|
||||
#endif
|
||||
|
||||
extern void __attribute__((format(printf, 1, 2)))
|
||||
extern void asmlinkage __attribute__((format(printf, 1, 2)))
|
||||
early_printk(const char *fmt, ...);
|
||||
|
||||
unsigned long int_sqrt(unsigned long);
|
||||
|
@@ -259,6 +259,10 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
|
||||
struct jprobe;
|
||||
struct kretprobe;
|
||||
|
||||
static inline struct kprobe *get_kprobe(void *addr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct kprobe *kprobe_running(void)
|
||||
{
|
||||
return NULL;
|
||||
|
@@ -27,6 +27,7 @@
|
||||
#define __LINUX_LIBATA_H__
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/scatterlist.h>
|
||||
@@ -115,7 +116,7 @@ enum {
|
||||
/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
|
||||
ATA_MAX_QUEUE = 32,
|
||||
ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
|
||||
ATA_SHORT_PAUSE = (HZ >> 6) + 1,
|
||||
ATA_SHORT_PAUSE = 16,
|
||||
|
||||
ATAPI_MAX_DRAIN = 16 << 10,
|
||||
|
||||
@@ -168,6 +169,7 @@ enum {
|
||||
ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB,
|
||||
ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */
|
||||
ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
|
||||
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
|
||||
|
||||
/* struct ata_port flags */
|
||||
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
|
||||
@@ -190,6 +192,10 @@ enum {
|
||||
ATA_FLAG_AN = (1 << 18), /* controller supports AN */
|
||||
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
|
||||
ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */
|
||||
ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
|
||||
* management */
|
||||
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
|
||||
* led */
|
||||
|
||||
/* The following flag belongs to ap->pflags but is kept in
|
||||
* ap->flags because it's referenced in many LLDs and will be
|
||||
@@ -234,17 +240,16 @@ enum {
|
||||
/* bits 24:31 of host->flags are reserved for LLD specific flags */
|
||||
|
||||
/* various lengths of time */
|
||||
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
|
||||
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
|
||||
ATA_TMOUT_INTERNAL = 30 * HZ,
|
||||
ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
|
||||
ATA_TMOUT_BOOT = 30000, /* heuristic */
|
||||
ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
|
||||
ATA_TMOUT_INTERNAL_QUICK = 5000,
|
||||
|
||||
/* FIXME: GoVault needs 2s but we can't afford that without
|
||||
* parallel probing. 800ms is enough for iVDR disk
|
||||
* HHD424020F7SV00. Increase to 2secs when parallel probing
|
||||
* is in place.
|
||||
*/
|
||||
ATA_TMOUT_FF_WAIT = 4 * HZ / 5,
|
||||
ATA_TMOUT_FF_WAIT = 800,
|
||||
|
||||
/* Spec mandates to wait for ">= 2ms" before checking status
|
||||
* after reset. We wait 150ms, because that was the magic
|
||||
@@ -256,14 +261,14 @@ enum {
|
||||
*
|
||||
* Old drivers/ide uses the 2mS rule and then waits for ready.
|
||||
*/
|
||||
ATA_WAIT_AFTER_RESET_MSECS = 150,
|
||||
ATA_WAIT_AFTER_RESET = 150,
|
||||
|
||||
/* If PMP is supported, we have to do follow-up SRST. As some
|
||||
* PMPs don't send D2H Reg FIS after hardreset, LLDs are
|
||||
* advised to wait only for the following duration before
|
||||
* doing SRST.
|
||||
*/
|
||||
ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ,
|
||||
ATA_TMOUT_PMP_SRST_WAIT = 1000,
|
||||
|
||||
/* ATA bus states */
|
||||
BUS_UNKNOWN = 0,
|
||||
@@ -340,6 +345,11 @@ enum {
|
||||
|
||||
SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */
|
||||
|
||||
/* This should match the actual table size of
|
||||
* ata_eh_cmd_timeout_table in libata-eh.c.
|
||||
*/
|
||||
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 5,
|
||||
|
||||
/* Horkage types. May be set by libata or controller on drives
|
||||
(some horkage may be drive/controller pair dependant */
|
||||
|
||||
@@ -441,6 +451,15 @@ enum link_pm {
|
||||
MEDIUM_POWER,
|
||||
};
|
||||
extern struct device_attribute dev_attr_link_power_management_policy;
|
||||
extern struct device_attribute dev_attr_em_message_type;
|
||||
extern struct device_attribute dev_attr_em_message;
|
||||
extern struct device_attribute dev_attr_sw_activity;
|
||||
|
||||
enum sw_activity {
|
||||
OFF,
|
||||
BLINK_ON,
|
||||
BLINK_OFF,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ATA_SFF
|
||||
struct ata_ioports {
|
||||
@@ -597,10 +616,14 @@ struct ata_eh_info {
|
||||
struct ata_eh_context {
|
||||
struct ata_eh_info i;
|
||||
int tries[ATA_MAX_DEVICES];
|
||||
int cmd_timeout_idx[ATA_MAX_DEVICES]
|
||||
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
|
||||
unsigned int classes[ATA_MAX_DEVICES];
|
||||
unsigned int did_probe_mask;
|
||||
unsigned int saved_ncq_enabled;
|
||||
u8 saved_xfer_mode[ATA_MAX_DEVICES];
|
||||
/* timestamp for the last reset attempt or success */
|
||||
unsigned long last_reset;
|
||||
};
|
||||
|
||||
struct ata_acpi_drive
|
||||
@@ -692,6 +715,7 @@ struct ata_port {
|
||||
struct timer_list fastdrain_timer;
|
||||
unsigned long fastdrain_cnt;
|
||||
|
||||
int em_message_type;
|
||||
void *private_data;
|
||||
|
||||
#ifdef CONFIG_ATA_ACPI
|
||||
@@ -783,6 +807,12 @@ struct ata_port_operations {
|
||||
u8 (*bmdma_status)(struct ata_port *ap);
|
||||
#endif /* CONFIG_ATA_SFF */
|
||||
|
||||
ssize_t (*em_show)(struct ata_port *ap, char *buf);
|
||||
ssize_t (*em_store)(struct ata_port *ap, const char *message,
|
||||
size_t size);
|
||||
ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf);
|
||||
ssize_t (*sw_activity_store)(struct ata_device *dev,
|
||||
enum sw_activity val);
|
||||
/*
|
||||
* Obsolete
|
||||
*/
|
||||
@@ -895,8 +925,7 @@ extern void ata_host_resume(struct ata_host *host);
|
||||
#endif
|
||||
extern int ata_ratelimit(void);
|
||||
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
|
||||
unsigned long interval_msec,
|
||||
unsigned long timeout_msec);
|
||||
unsigned long interval, unsigned long timeout);
|
||||
extern int atapi_cmd_type(u8 opcode);
|
||||
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
|
||||
u8 pmp, int is_cmd, u8 *fis);
|
||||
@@ -1389,6 +1418,12 @@ static inline int ata_check_ready(u8 status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long ata_deadline(unsigned long from_jiffies,
|
||||
unsigned long timeout_msecs)
|
||||
{
|
||||
return from_jiffies + msecs_to_jiffies(timeout_msecs);
|
||||
}
|
||||
|
||||
|
||||
/**************************************************************************
|
||||
* PMP - drivers/ata/libata-pmp.c
|
||||
|
@@ -4,6 +4,8 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/linkage.h>
|
||||
|
||||
#define notrace __attribute__((no_instrument_function))
|
||||
|
||||
#ifdef __cplusplus
|
||||
#define CPP_ASMLINKAGE extern "C"
|
||||
#else
|
||||
|
@@ -84,65 +84,6 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
|
||||
__list_add(new, head->prev, head);
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a new entry between two known consecutive entries.
|
||||
*
|
||||
* This is only for internal list manipulation where we know
|
||||
* the prev/next entries already!
|
||||
*/
|
||||
static inline void __list_add_rcu(struct list_head * new,
|
||||
struct list_head * prev, struct list_head * next)
|
||||
{
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
smp_wmb();
|
||||
next->prev = new;
|
||||
prev->next = new;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_add_rcu - add a new entry to rcu-protected list
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it after
|
||||
*
|
||||
* Insert a new entry after the specified head.
|
||||
* This is good for implementing stacks.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_add_rcu()
|
||||
* or list_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*/
|
||||
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
|
||||
{
|
||||
__list_add_rcu(new, head, head->next);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_add_tail_rcu - add a new entry to rcu-protected list
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it before
|
||||
*
|
||||
* Insert a new entry before the specified head.
|
||||
* This is useful for implementing queues.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_add_tail_rcu()
|
||||
* or list_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*/
|
||||
static inline void list_add_tail_rcu(struct list_head *new,
|
||||
struct list_head *head)
|
||||
{
|
||||
__list_add_rcu(new, head->prev, head);
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete a list entry by making the prev/next entries
|
||||
* point to each other.
|
||||
@@ -173,36 +114,6 @@ static inline void list_del(struct list_head *entry)
|
||||
extern void list_del(struct list_head *entry);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* list_del_rcu - deletes entry from list without re-initialization
|
||||
* @entry: the element to delete from the list.
|
||||
*
|
||||
* Note: list_empty() on entry does not return true after this,
|
||||
* the entry is in an undefined state. It is useful for RCU based
|
||||
* lockfree traversal.
|
||||
*
|
||||
* In particular, it means that we can not poison the forward
|
||||
* pointers that may still be used for walking the list.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_del_rcu()
|
||||
* or list_add_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*
|
||||
* Note that the caller is not permitted to immediately free
|
||||
* the newly deleted entry. Instead, either synchronize_rcu()
|
||||
* or call_rcu() must be used to defer freeing until an RCU
|
||||
* grace period has elapsed.
|
||||
*/
|
||||
static inline void list_del_rcu(struct list_head *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
entry->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_replace - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
@@ -226,25 +137,6 @@ static inline void list_replace_init(struct list_head *old,
|
||||
INIT_LIST_HEAD(old);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_replace_rcu - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* The @old entry will be replaced with the @new entry atomically.
|
||||
* Note: @old should not be empty.
|
||||
*/
|
||||
static inline void list_replace_rcu(struct list_head *old,
|
||||
struct list_head *new)
|
||||
{
|
||||
new->next = old->next;
|
||||
new->prev = old->prev;
|
||||
smp_wmb();
|
||||
new->next->prev = new;
|
||||
new->prev->next = new;
|
||||
old->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_del_init - deletes entry from list and reinitialize it.
|
||||
* @entry: the element to delete from the list.
|
||||
@@ -368,62 +260,6 @@ static inline void list_splice_init(struct list_head *list,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_init_rcu - splice an RCU-protected list into an existing list.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*
|
||||
* @head can be RCU-read traversed concurrently with this function.
|
||||
*
|
||||
* Note that this function blocks.
|
||||
*
|
||||
* Important note: the caller must take whatever action is necessary to
|
||||
* prevent any other updates to @head. In principle, it is possible
|
||||
* to modify the list as soon as sync() begins execution.
|
||||
* If this sort of thing becomes necessary, an alternative version
|
||||
* based on call_rcu() could be created. But only if -really-
|
||||
* needed -- there is no shortage of RCU API members.
|
||||
*/
|
||||
static inline void list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
void (*sync)(void))
|
||||
{
|
||||
struct list_head *first = list->next;
|
||||
struct list_head *last = list->prev;
|
||||
struct list_head *at = head->next;
|
||||
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
/* "first" and "last" tracking list, so initialize it. */
|
||||
|
||||
INIT_LIST_HEAD(list);
|
||||
|
||||
/*
|
||||
* At this point, the list body still points to the source list.
|
||||
* Wait for any readers to finish using the list before splicing
|
||||
* the list body into the new list. Any new readers will see
|
||||
* an empty list.
|
||||
*/
|
||||
|
||||
sync();
|
||||
|
||||
/*
|
||||
* Readers are finished with the source list, so perform splice.
|
||||
* The order is important if the new list is global and accessible
|
||||
* to concurrent RCU readers. Note that RCU readers are not
|
||||
* permitted to traverse the prev pointers without excluding
|
||||
* this function.
|
||||
*/
|
||||
|
||||
last->next = at;
|
||||
smp_wmb();
|
||||
head->next = first;
|
||||
first->prev = head;
|
||||
at->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_entry - get the struct for this entry
|
||||
* @ptr: the &struct list_head pointer.
|
||||
@@ -629,57 +465,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
|
||||
|
||||
/**
|
||||
* list_for_each_rcu - iterate over an rcu-protected list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
prefetch(pos->next), pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_struct within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
|
||||
prefetch(pos->member.next), &pos->member != (head); \
|
||||
pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
|
||||
|
||||
|
||||
/**
|
||||
* list_for_each_continue_rcu
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*
|
||||
* Iterate over an rcu-protected list, continuing after current point.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_continue_rcu(pos, head) \
|
||||
for ((pos) = rcu_dereference((pos)->next); \
|
||||
prefetch((pos)->next), (pos) != (head); \
|
||||
(pos) = rcu_dereference((pos)->next))
|
||||
|
||||
/*
|
||||
* Double linked lists with a single pointer list head.
|
||||
* Mostly useful for hash tables where the two pointer list head is
|
||||
@@ -730,31 +515,6 @@ static inline void hlist_del(struct hlist_node *n)
|
||||
n->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_del_rcu - deletes entry from hash list without re-initialization
|
||||
* @n: the element to delete from the hash list.
|
||||
*
|
||||
* Note: list_unhashed() on entry does not return true after this,
|
||||
* the entry is in an undefined state. It is useful for RCU based
|
||||
* lockfree traversal.
|
||||
*
|
||||
* In particular, it means that we can not poison the forward
|
||||
* pointers that may still be used for walking the hash list.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry().
|
||||
*/
|
||||
static inline void hlist_del_rcu(struct hlist_node *n)
|
||||
{
|
||||
__hlist_del(n);
|
||||
n->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
static inline void hlist_del_init(struct hlist_node *n)
|
||||
{
|
||||
if (!hlist_unhashed(n)) {
|
||||
@@ -763,27 +523,6 @@ static inline void hlist_del_init(struct hlist_node *n)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_replace_rcu - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* The @old entry will be replaced with the @new entry atomically.
|
||||
*/
|
||||
static inline void hlist_replace_rcu(struct hlist_node *old,
|
||||
struct hlist_node *new)
|
||||
{
|
||||
struct hlist_node *next = old->next;
|
||||
|
||||
new->next = next;
|
||||
new->pprev = old->pprev;
|
||||
smp_wmb();
|
||||
if (next)
|
||||
new->next->pprev = &new->next;
|
||||
*new->pprev = new;
|
||||
old->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
|
||||
{
|
||||
struct hlist_node *first = h->first;
|
||||
@@ -794,38 +533,6 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
|
||||
n->pprev = &h->first;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* hlist_add_head_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist,
|
||||
* while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_add_head_rcu(struct hlist_node *n,
|
||||
struct hlist_head *h)
|
||||
{
|
||||
struct hlist_node *first = h->first;
|
||||
n->next = first;
|
||||
n->pprev = &h->first;
|
||||
smp_wmb();
|
||||
if (first)
|
||||
first->pprev = &n->next;
|
||||
h->first = n;
|
||||
}
|
||||
|
||||
/* next must be != NULL */
|
||||
static inline void hlist_add_before(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
@@ -847,63 +554,6 @@ static inline void hlist_add_after(struct hlist_node *n,
|
||||
next->next->pprev = &next->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_before_rcu
|
||||
* @n: the new element to add to the hash list.
|
||||
* @next: the existing element to add the new element before.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist
|
||||
* before the specified node while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs.
|
||||
*/
|
||||
static inline void hlist_add_before_rcu(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
{
|
||||
n->pprev = next->pprev;
|
||||
n->next = next;
|
||||
smp_wmb();
|
||||
next->pprev = &n->next;
|
||||
*(n->pprev) = n;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_after_rcu
|
||||
* @prev: the existing element to add the new element after.
|
||||
* @n: the new element to add to the hash list.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist
|
||||
* after the specified node while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs.
|
||||
*/
|
||||
static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
struct hlist_node *n)
|
||||
{
|
||||
n->next = prev->next;
|
||||
n->pprev = &prev->next;
|
||||
smp_wmb();
|
||||
prev->next = n;
|
||||
if (n->next)
|
||||
n->next->pprev = &n->next;
|
||||
}
|
||||
|
||||
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
|
||||
|
||||
#define hlist_for_each(pos, head) \
|
||||
@@ -964,21 +614,4 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = n)
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference((head)->first); \
|
||||
pos && ({ prefetch(pos->next); 1;}) && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
#endif
|
||||
|
@@ -122,11 +122,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
|
||||
*/
|
||||
|
||||
#define LM_OUT_ST_MASK 0x00000003
|
||||
#define LM_OUT_CACHEABLE 0x00000004
|
||||
#define LM_OUT_CANCELED 0x00000008
|
||||
#define LM_OUT_ASYNC 0x00000080
|
||||
#define LM_OUT_ERROR 0x00000100
|
||||
#define LM_OUT_CONV_DEADLK 0x00000200
|
||||
|
||||
/*
|
||||
* lm_callback_t types
|
||||
@@ -138,9 +136,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
|
||||
* LM_CB_NEED_RECOVERY
|
||||
* The given journal needs to be recovered.
|
||||
*
|
||||
* LM_CB_DROPLOCKS
|
||||
* Reduce the number of cached locks.
|
||||
*
|
||||
* LM_CB_ASYNC
|
||||
* The given lock has been granted.
|
||||
*/
|
||||
@@ -149,7 +144,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
|
||||
#define LM_CB_NEED_D 258
|
||||
#define LM_CB_NEED_S 259
|
||||
#define LM_CB_NEED_RECOVERY 260
|
||||
#define LM_CB_DROPLOCKS 261
|
||||
#define LM_CB_ASYNC 262
|
||||
|
||||
/*
|
||||
|
@@ -182,6 +182,9 @@ struct lock_list {
|
||||
* We record lock dependency chains, so that we can cache them:
|
||||
*/
|
||||
struct lock_chain {
|
||||
u8 irq_context;
|
||||
u8 depth;
|
||||
u16 base;
|
||||
struct list_head entry;
|
||||
u64 chain_key;
|
||||
};
|
||||
@@ -275,14 +278,6 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
lockdep_init_map(&(lock)->dep_map, #lock, \
|
||||
(lock)->dep_map.key, sub)
|
||||
|
||||
/*
|
||||
* To initialize a lockdep_map statically use this macro.
|
||||
* Note that _name must not be NULL.
|
||||
*/
|
||||
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
|
||||
{ .name = (_name), .key = (void *)(_key), }
|
||||
|
||||
|
||||
/*
|
||||
* Acquire a lock.
|
||||
*
|
||||
|
@@ -44,8 +44,8 @@ struct marker {
|
||||
*/
|
||||
char state; /* Marker state. */
|
||||
char ptype; /* probe type : 0 : single, 1 : multi */
|
||||
void (*call)(const struct marker *mdata, /* Probe wrapper */
|
||||
void *call_private, const char *fmt, ...);
|
||||
/* Probe wrapper */
|
||||
void (*call)(const struct marker *mdata, void *call_private, ...);
|
||||
struct marker_probe_closure single;
|
||||
struct marker_probe_closure *multi;
|
||||
} __attribute__((aligned(8)));
|
||||
@@ -58,8 +58,12 @@ struct marker {
|
||||
* Make sure the alignment of the structure in the __markers section will
|
||||
* not add unwanted padding between the beginning of the section and the
|
||||
* structure. Force alignment to the same alignment as the section start.
|
||||
*
|
||||
* The "generic" argument controls which marker enabling mechanism must be used.
|
||||
* If generic is true, a variable read is used.
|
||||
* If generic is false, immediate values are used.
|
||||
*/
|
||||
#define __trace_mark(name, call_private, format, args...) \
|
||||
#define __trace_mark(generic, name, call_private, format, args...) \
|
||||
do { \
|
||||
static const char __mstrtab_##name[] \
|
||||
__attribute__((section("__markers_strings"))) \
|
||||
@@ -72,15 +76,14 @@ struct marker {
|
||||
__mark_check_format(format, ## args); \
|
||||
if (unlikely(__mark_##name.state)) { \
|
||||
(*__mark_##name.call) \
|
||||
(&__mark_##name, call_private, \
|
||||
format, ## args); \
|
||||
(&__mark_##name, call_private, ## args);\
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
extern void marker_update_probe_range(struct marker *begin,
|
||||
struct marker *end);
|
||||
#else /* !CONFIG_MARKERS */
|
||||
#define __trace_mark(name, call_private, format, args...) \
|
||||
#define __trace_mark(generic, name, call_private, format, args...) \
|
||||
__mark_check_format(format, ## args)
|
||||
static inline void marker_update_probe_range(struct marker *begin,
|
||||
struct marker *end)
|
||||
@@ -88,15 +91,30 @@ static inline void marker_update_probe_range(struct marker *begin,
|
||||
#endif /* CONFIG_MARKERS */
|
||||
|
||||
/**
|
||||
* trace_mark - Marker
|
||||
* trace_mark - Marker using code patching
|
||||
* @name: marker name, not quoted.
|
||||
* @format: format string
|
||||
* @args...: variable argument list
|
||||
*
|
||||
* Places a marker.
|
||||
* Places a marker using optimized code patching technique (imv_read())
|
||||
* to be enabled when immediate values are present.
|
||||
*/
|
||||
#define trace_mark(name, format, args...) \
|
||||
__trace_mark(name, NULL, format, ## args)
|
||||
__trace_mark(0, name, NULL, format, ## args)
|
||||
|
||||
/**
|
||||
* _trace_mark - Marker using variable read
|
||||
* @name: marker name, not quoted.
|
||||
* @format: format string
|
||||
* @args...: variable argument list
|
||||
*
|
||||
* Places a marker using a standard memory read (_imv_read()) to be
|
||||
* enabled. Should be used for markers in code paths where instruction
|
||||
* modification based enabling is not welcome. (__init and __exit functions,
|
||||
* lockdep, some traps, printk).
|
||||
*/
|
||||
#define _trace_mark(name, format, args...) \
|
||||
__trace_mark(1, name, NULL, format, ## args)
|
||||
|
||||
/**
|
||||
* MARK_NOARGS - Format string for a marker with no argument.
|
||||
@@ -117,9 +135,9 @@ static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...)
|
||||
extern marker_probe_func __mark_empty_function;
|
||||
|
||||
extern void marker_probe_cb(const struct marker *mdata,
|
||||
void *call_private, const char *fmt, ...);
|
||||
void *call_private, ...);
|
||||
extern void marker_probe_cb_noarg(const struct marker *mdata,
|
||||
void *call_private, const char *fmt, ...);
|
||||
void *call_private, ...);
|
||||
|
||||
/*
|
||||
* Connect a probe to a marker.
|
||||
|
@@ -398,7 +398,8 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
|
||||
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
|
||||
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
|
||||
|
||||
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
|
||||
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
int block_mcast_loopback);
|
||||
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
|
||||
|
||||
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
|
||||
|
@@ -108,6 +108,7 @@ extern unsigned int kobjsize(const void *objp);
|
||||
|
||||
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
|
||||
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
|
||||
#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
|
||||
|
||||
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
|
||||
|
@@ -33,6 +33,32 @@ static inline void vm_unacct_memory(long pages)
|
||||
vm_acct_memory(-pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow architectures to handle additional protection bits
|
||||
*/
|
||||
|
||||
#ifndef arch_calc_vm_prot_bits
|
||||
#define arch_calc_vm_prot_bits(prot) 0
|
||||
#endif
|
||||
|
||||
#ifndef arch_vm_get_page_prot
|
||||
#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
|
||||
#endif
|
||||
|
||||
#ifndef arch_validate_prot
|
||||
/*
|
||||
* This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
|
||||
* already been masked out.
|
||||
*
|
||||
* Returns true if the prot flags are valid
|
||||
*/
|
||||
static inline int arch_validate_prot(unsigned long prot)
|
||||
{
|
||||
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
|
||||
}
|
||||
#define arch_validate_prot arch_validate_prot
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Optimisation macro. It is equivalent to:
|
||||
* (x & bit1) ? bit2 : 0
|
||||
@@ -51,7 +77,8 @@ calc_vm_prot_bits(unsigned long prot)
|
||||
{
|
||||
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
|
||||
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
|
||||
_calc_vm_trans(prot, PROT_EXEC, VM_EXEC );
|
||||
_calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
|
||||
arch_calc_vm_prot_bits(prot);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -135,6 +135,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
|
||||
struct mmc_command *, int);
|
||||
|
||||
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
|
||||
extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
|
||||
|
||||
extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
|
||||
extern void mmc_release_host(struct mmc_host *host);
|
||||
|
@@ -51,8 +51,30 @@ struct mmc_ios {
|
||||
|
||||
struct mmc_host_ops {
|
||||
void (*request)(struct mmc_host *host, struct mmc_request *req);
|
||||
/*
|
||||
* Avoid calling these three functions too often or in a "fast path",
|
||||
* since underlaying controller might implement them in an expensive
|
||||
* and/or slow way.
|
||||
*
|
||||
* Also note that these functions might sleep, so don't call them
|
||||
* in the atomic contexts!
|
||||
*
|
||||
* Return values for the get_ro callback should be:
|
||||
* 0 for a read/write card
|
||||
* 1 for a read-only card
|
||||
* -ENOSYS when not supported (equal to NULL callback)
|
||||
* or a negative errno value when something bad happened
|
||||
*
|
||||
* Return values for the get_ro callback should be:
|
||||
* 0 for a absent card
|
||||
* 1 for a present card
|
||||
* -ENOSYS when not supported (equal to NULL callback)
|
||||
* or a negative errno value when something bad happened
|
||||
*/
|
||||
void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
|
||||
int (*get_ro)(struct mmc_host *host);
|
||||
int (*get_cd)(struct mmc_host *host);
|
||||
|
||||
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
|
||||
};
|
||||
|
||||
@@ -89,11 +111,11 @@ struct mmc_host {
|
||||
unsigned long caps; /* Host capabilities */
|
||||
|
||||
#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
|
||||
#define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */
|
||||
#define MMC_CAP_MMC_HIGHSPEED (1 << 2) /* Can do MMC high-speed timing */
|
||||
#define MMC_CAP_SD_HIGHSPEED (1 << 3) /* Can do SD high-speed timing */
|
||||
#define MMC_CAP_SDIO_IRQ (1 << 4) /* Can signal pending SDIO IRQs */
|
||||
#define MMC_CAP_SPI (1 << 5) /* Talks only SPI protocols */
|
||||
#define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */
|
||||
#define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */
|
||||
#define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */
|
||||
#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
|
||||
#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
|
||||
|
||||
/* host specific block data */
|
||||
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
|
||||
|
@@ -16,7 +16,6 @@
|
||||
* Based strongly on code by:
|
||||
*
|
||||
* Author: Yong-iL Joh <tolkien@mizi.com>
|
||||
* Date : $Date: 2002/06/18 12:37:30 $
|
||||
*
|
||||
* Author: Andrew Christian
|
||||
* 15 May 2002
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* include/linux/mmc/sdio_func.h
|
||||
*
|
||||
* Copyright 2007 Pierre Ossman
|
||||
* Copyright 2007-2008 Pierre Ossman
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@@ -46,6 +46,8 @@ struct sdio_func {
|
||||
unsigned max_blksize; /* maximum block size */
|
||||
unsigned cur_blksize; /* current block size */
|
||||
|
||||
unsigned enable_timeout; /* max enable timeout in msec */
|
||||
|
||||
unsigned int state; /* function state */
|
||||
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
|
||||
|
||||
@@ -120,23 +122,22 @@ extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz);
|
||||
extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler);
|
||||
extern int sdio_release_irq(struct sdio_func *func);
|
||||
|
||||
extern unsigned char sdio_readb(struct sdio_func *func,
|
||||
unsigned int addr, int *err_ret);
|
||||
extern unsigned short sdio_readw(struct sdio_func *func,
|
||||
unsigned int addr, int *err_ret);
|
||||
extern unsigned long sdio_readl(struct sdio_func *func,
|
||||
unsigned int addr, int *err_ret);
|
||||
extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
|
||||
|
||||
extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
|
||||
extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
|
||||
extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
|
||||
|
||||
extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst,
|
||||
unsigned int addr, int count);
|
||||
extern int sdio_readsb(struct sdio_func *func, void *dst,
|
||||
unsigned int addr, int count);
|
||||
|
||||
extern void sdio_writeb(struct sdio_func *func, unsigned char b,
|
||||
extern void sdio_writeb(struct sdio_func *func, u8 b,
|
||||
unsigned int addr, int *err_ret);
|
||||
extern void sdio_writew(struct sdio_func *func, unsigned short b,
|
||||
extern void sdio_writew(struct sdio_func *func, u16 b,
|
||||
unsigned int addr, int *err_ret);
|
||||
extern void sdio_writel(struct sdio_func *func, unsigned long b,
|
||||
extern void sdio_writel(struct sdio_func *func, u32 b,
|
||||
unsigned int addr, int *err_ret);
|
||||
|
||||
extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
|
||||
|
85
include/linux/mmiotrace.h
Normal file
85
include/linux/mmiotrace.h
Normal file
@@ -0,0 +1,85 @@
|
||||
#ifndef MMIOTRACE_H
|
||||
#define MMIOTRACE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct kmmio_probe;
|
||||
struct pt_regs;
|
||||
|
||||
typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *,
|
||||
struct pt_regs *, unsigned long addr);
|
||||
typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
|
||||
unsigned long condition, struct pt_regs *);
|
||||
|
||||
struct kmmio_probe {
|
||||
struct list_head list; /* kmmio internal list */
|
||||
unsigned long addr; /* start location of the probe point */
|
||||
unsigned long len; /* length of the probe region */
|
||||
kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */
|
||||
kmmio_post_handler_t post_handler; /* Called after addr is executed */
|
||||
void *private;
|
||||
};
|
||||
|
||||
/* kmmio is active by some kmmio_probes? */
|
||||
static inline int is_kmmio_active(void)
|
||||
{
|
||||
extern unsigned int kmmio_count;
|
||||
return kmmio_count;
|
||||
}
|
||||
|
||||
extern int register_kmmio_probe(struct kmmio_probe *p);
|
||||
extern void unregister_kmmio_probe(struct kmmio_probe *p);
|
||||
|
||||
/* Called from page fault handler. */
|
||||
extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
|
||||
|
||||
/* Called from ioremap.c */
|
||||
#ifdef CONFIG_MMIOTRACE
|
||||
extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
|
||||
void __iomem *addr);
|
||||
extern void mmiotrace_iounmap(volatile void __iomem *addr);
|
||||
#else
|
||||
static inline void mmiotrace_ioremap(resource_size_t offset,
|
||||
unsigned long size, void __iomem *addr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmiotrace_iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_MMIOTRACE_HOOKS */
|
||||
|
||||
enum mm_io_opcode {
|
||||
MMIO_READ = 0x1, /* struct mmiotrace_rw */
|
||||
MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
|
||||
MMIO_PROBE = 0x3, /* struct mmiotrace_map */
|
||||
MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
|
||||
MMIO_MARKER = 0x5, /* raw char data */
|
||||
MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */
|
||||
};
|
||||
|
||||
struct mmiotrace_rw {
|
||||
resource_size_t phys; /* PCI address of register */
|
||||
unsigned long value;
|
||||
unsigned long pc; /* optional program counter */
|
||||
int map_id;
|
||||
unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
|
||||
unsigned char width; /* size of register access in bytes */
|
||||
};
|
||||
|
||||
struct mmiotrace_map {
|
||||
resource_size_t phys; /* base address in PCI space */
|
||||
unsigned long virt; /* base virtual address */
|
||||
unsigned long len; /* mapping size */
|
||||
int map_id;
|
||||
unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
|
||||
};
|
||||
|
||||
/* in kernel/trace/trace_mmiotrace.c */
|
||||
extern void enable_mmiotrace(void);
|
||||
extern void disable_mmiotrace(void);
|
||||
extern void mmio_trace_rw(struct mmiotrace_rw *rw);
|
||||
extern void mmio_trace_mapping(struct mmiotrace_map *map);
|
||||
|
||||
#endif /* MMIOTRACE_H */
|
@@ -159,6 +159,15 @@ struct ap_device_id {
|
||||
|
||||
#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
|
||||
|
||||
/* s390 css bus devices (subchannels) */
|
||||
struct css_device_id {
|
||||
__u8 match_flags;
|
||||
__u8 type; /* subchannel type */
|
||||
__u16 pad2;
|
||||
__u32 pad3;
|
||||
kernel_ulong_t driver_data;
|
||||
};
|
||||
|
||||
#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */
|
||||
/* to workaround crosscompile issues */
|
||||
|
||||
|
@@ -11,11 +11,21 @@
|
||||
*/
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
struct mpage_data {
|
||||
struct bio *bio;
|
||||
sector_t last_block_in_bio;
|
||||
get_block_t *get_block;
|
||||
unsigned use_writepage;
|
||||
};
|
||||
|
||||
struct writeback_control;
|
||||
|
||||
struct bio *mpage_bio_submit(int rw, struct bio *bio);
|
||||
int mpage_readpages(struct address_space *mapping, struct list_head *pages,
|
||||
unsigned nr_pages, get_block_t get_block);
|
||||
int mpage_readpage(struct page *page, get_block_t get_block);
|
||||
int __mpage_writepage(struct page *page, struct writeback_control *wbc,
|
||||
void *data);
|
||||
int mpage_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc, get_block_t get_block);
|
||||
int mpage_writepage(struct page *page, get_block_t *get_block,
|
||||
|
@@ -12,9 +12,19 @@
|
||||
#include <linux/magic.h>
|
||||
|
||||
/* Default timeout values */
|
||||
#define NFS_DEF_UDP_TIMEO (11)
|
||||
#define NFS_DEF_UDP_RETRANS (3)
|
||||
#define NFS_DEF_TCP_TIMEO (600)
|
||||
#define NFS_DEF_TCP_RETRANS (2)
|
||||
|
||||
#define NFS_MAX_UDP_TIMEOUT (60*HZ)
|
||||
#define NFS_MAX_TCP_TIMEOUT (600*HZ)
|
||||
|
||||
#define NFS_DEF_ACREGMIN (3)
|
||||
#define NFS_DEF_ACREGMAX (60)
|
||||
#define NFS_DEF_ACDIRMIN (30)
|
||||
#define NFS_DEF_ACDIRMAX (60)
|
||||
|
||||
/*
|
||||
* When flushing a cluster of dirty pages, there can be different
|
||||
* strategies:
|
||||
|
119
include/linux/nfs_iostat.h
Normal file
119
include/linux/nfs_iostat.h
Normal file
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
* User-space visible declarations for NFS client per-mount
|
||||
* point statistics
|
||||
*
|
||||
* Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com>
|
||||
*
|
||||
* NFS client per-mount statistics provide information about the
|
||||
* health of the NFS client and the health of each NFS mount point.
|
||||
* Generally these are not for detailed problem diagnosis, but
|
||||
* simply to indicate that there is a problem.
|
||||
*
|
||||
* These counters are not meant to be human-readable, but are meant
|
||||
* to be integrated into system monitoring tools such as "sar" and
|
||||
* "iostat". As such, the counters are sampled by the tools over
|
||||
* time, and are never zeroed after a file system is mounted.
|
||||
* Moving averages can be computed by the tools by taking the
|
||||
* difference between two instantaneous samples and dividing that
|
||||
* by the time between the samples.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_NFS_IOSTAT
|
||||
#define _LINUX_NFS_IOSTAT
|
||||
|
||||
#define NFS_IOSTAT_VERS "1.0"
|
||||
|
||||
/*
|
||||
* NFS byte counters
|
||||
*
|
||||
* 1. SERVER - the number of payload bytes read from or written
|
||||
* to the server by the NFS client via an NFS READ or WRITE
|
||||
* request.
|
||||
*
|
||||
* 2. NORMAL - the number of bytes read or written by applications
|
||||
* via the read(2) and write(2) system call interfaces.
|
||||
*
|
||||
* 3. DIRECT - the number of bytes read or written from files
|
||||
* opened with the O_DIRECT flag.
|
||||
*
|
||||
* These counters give a view of the data throughput into and out
|
||||
* of the NFS client. Comparing the number of bytes requested by
|
||||
* an application with the number of bytes the client requests from
|
||||
* the server can provide an indication of client efficiency
|
||||
* (per-op, cache hits, etc).
|
||||
*
|
||||
* These counters can also help characterize which access methods
|
||||
* are in use. DIRECT by itself shows whether there is any O_DIRECT
|
||||
* traffic. NORMAL + DIRECT shows how much data is going through
|
||||
* the system call interface. A large amount of SERVER traffic
|
||||
* without much NORMAL or DIRECT traffic shows that applications
|
||||
* are using mapped files.
|
||||
*
|
||||
* NFS page counters
|
||||
*
|
||||
* These count the number of pages read or written via nfs_readpage(),
|
||||
* nfs_readpages(), or their write equivalents.
|
||||
*
|
||||
* NB: When adding new byte counters, please include the measured
|
||||
* units in the name of each byte counter to help users of this
|
||||
* interface determine what exactly is being counted.
|
||||
*/
|
||||
enum nfs_stat_bytecounters {
|
||||
NFSIOS_NORMALREADBYTES = 0,
|
||||
NFSIOS_NORMALWRITTENBYTES,
|
||||
NFSIOS_DIRECTREADBYTES,
|
||||
NFSIOS_DIRECTWRITTENBYTES,
|
||||
NFSIOS_SERVERREADBYTES,
|
||||
NFSIOS_SERVERWRITTENBYTES,
|
||||
NFSIOS_READPAGES,
|
||||
NFSIOS_WRITEPAGES,
|
||||
__NFSIOS_BYTESMAX,
|
||||
};
|
||||
|
||||
/*
|
||||
* NFS event counters
|
||||
*
|
||||
* These counters provide a low-overhead way of monitoring client
|
||||
* activity without enabling NFS trace debugging. The counters
|
||||
* show the rate at which VFS requests are made, and how often the
|
||||
* client invalidates its data and attribute caches. This allows
|
||||
* system administrators to monitor such things as how close-to-open
|
||||
* is working, and answer questions such as "why are there so many
|
||||
* GETATTR requests on the wire?"
|
||||
*
|
||||
* They also count anamolous events such as short reads and writes,
|
||||
* silly renames due to close-after-delete, and operations that
|
||||
* change the size of a file (such operations can often be the
|
||||
* source of data corruption if applications aren't using file
|
||||
* locking properly).
|
||||
*/
|
||||
enum nfs_stat_eventcounters {
|
||||
NFSIOS_INODEREVALIDATE = 0,
|
||||
NFSIOS_DENTRYREVALIDATE,
|
||||
NFSIOS_DATAINVALIDATE,
|
||||
NFSIOS_ATTRINVALIDATE,
|
||||
NFSIOS_VFSOPEN,
|
||||
NFSIOS_VFSLOOKUP,
|
||||
NFSIOS_VFSACCESS,
|
||||
NFSIOS_VFSUPDATEPAGE,
|
||||
NFSIOS_VFSREADPAGE,
|
||||
NFSIOS_VFSREADPAGES,
|
||||
NFSIOS_VFSWRITEPAGE,
|
||||
NFSIOS_VFSWRITEPAGES,
|
||||
NFSIOS_VFSGETDENTS,
|
||||
NFSIOS_VFSSETATTR,
|
||||
NFSIOS_VFSFLUSH,
|
||||
NFSIOS_VFSFSYNC,
|
||||
NFSIOS_VFSLOCK,
|
||||
NFSIOS_VFSRELEASE,
|
||||
NFSIOS_CONGESTIONWAIT,
|
||||
NFSIOS_SETATTRTRUNC,
|
||||
NFSIOS_EXTENDWRITE,
|
||||
NFSIOS_SILLYRENAME,
|
||||
NFSIOS_SHORTREAD,
|
||||
NFSIOS_SHORTWRITE,
|
||||
NFSIOS_DELAY,
|
||||
__NFSIOS_COUNTSMAX,
|
||||
};
|
||||
|
||||
#endif /* _LINUX_NFS_IOSTAT */
|
@@ -27,9 +27,12 @@
|
||||
/*
|
||||
* Valid flags for a dirty buffer
|
||||
*/
|
||||
#define PG_BUSY 0
|
||||
#define PG_NEED_COMMIT 1
|
||||
#define PG_NEED_RESCHED 2
|
||||
enum {
|
||||
PG_BUSY = 0,
|
||||
PG_CLEAN,
|
||||
PG_NEED_COMMIT,
|
||||
PG_NEED_RESCHED,
|
||||
};
|
||||
|
||||
struct nfs_inode;
|
||||
struct nfs_page {
|
||||
|
@@ -829,9 +829,8 @@ struct nfs_rpc_ops {
|
||||
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
|
||||
void (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
|
||||
int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
|
||||
int (*file_open) (struct inode *, struct file *);
|
||||
int (*file_release) (struct inode *, struct file *);
|
||||
int (*lock)(struct file *, int, struct file_lock *);
|
||||
int (*lock_check_bounds)(const struct file_lock *);
|
||||
void (*clear_acl_cache)(struct inode *);
|
||||
};
|
||||
|
||||
|
@@ -24,4 +24,7 @@ static inline void of_device_free(struct of_device *dev)
|
||||
of_release_dev(&dev->dev);
|
||||
}
|
||||
|
||||
extern ssize_t of_device_get_modalias(struct of_device *ofdev,
|
||||
char *str, ssize_t len);
|
||||
|
||||
#endif /* _LINUX_OF_DEVICE_H */
|
||||
|
@@ -17,8 +17,7 @@
|
||||
#ifndef LINUX_PCI_H
|
||||
#define LINUX_PCI_H
|
||||
|
||||
/* Include the pci register defines */
|
||||
#include <linux/pci_regs.h>
|
||||
#include <linux/pci_regs.h> /* The pci register defines */
|
||||
|
||||
/*
|
||||
* The PCI interface treats multi-function devices as independent
|
||||
@@ -49,12 +48,22 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/* Include the ID list */
|
||||
#include <linux/pci_ids.h>
|
||||
|
||||
/* pci_slot represents a physical slot */
|
||||
struct pci_slot {
|
||||
struct pci_bus *bus; /* The bus this slot is on */
|
||||
struct list_head list; /* node in list of slots on this bus */
|
||||
struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
|
||||
unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
/* File state for mmap()s on /proc/bus/pci/X/Y */
|
||||
enum pci_mmap_state {
|
||||
pci_mmap_io,
|
||||
@@ -147,6 +156,7 @@ struct pci_dev {
|
||||
|
||||
void *sysdata; /* hook for sys-specific extension */
|
||||
struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
|
||||
struct pci_slot *slot; /* Physical slot this device is in */
|
||||
|
||||
unsigned int devfn; /* encoded device & function index */
|
||||
unsigned short vendor;
|
||||
@@ -172,6 +182,13 @@ struct pci_dev {
|
||||
pci_power_t current_state; /* Current operating state. In ACPI-speak,
|
||||
this is D0-D3, D0 being fully functional,
|
||||
and D3 being off. */
|
||||
int pm_cap; /* PM capability offset in the
|
||||
configuration space */
|
||||
unsigned int pme_support:5; /* Bitmask of states from which PME#
|
||||
can be generated */
|
||||
unsigned int d1_support:1; /* Low power state D1 is supported */
|
||||
unsigned int d2_support:1; /* Low power state D2 is supported */
|
||||
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
|
||||
|
||||
#ifdef CONFIG_PCIEASPM
|
||||
struct pcie_link_state *link_state; /* ASPM link state. */
|
||||
@@ -196,7 +213,6 @@ struct pci_dev {
|
||||
unsigned int is_added:1;
|
||||
unsigned int is_busmaster:1; /* device is busmaster */
|
||||
unsigned int no_msi:1; /* device may not use msi */
|
||||
unsigned int no_d1d2:1; /* only allow d0 or d3 */
|
||||
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
|
||||
unsigned int broken_parity_status:1; /* Device generates false positive parity */
|
||||
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
|
||||
@@ -273,6 +289,7 @@ struct pci_bus {
|
||||
struct list_head children; /* list of child buses */
|
||||
struct list_head devices; /* list of devices on this bus */
|
||||
struct pci_dev *self; /* bridge device as seen by parent */
|
||||
struct list_head slots; /* list of slots on this bus */
|
||||
struct resource *resource[PCI_BUS_NUM_RESOURCES];
|
||||
/* address space routed to this bus */
|
||||
|
||||
@@ -334,7 +351,7 @@ struct pci_bus_region {
|
||||
struct pci_dynids {
|
||||
spinlock_t lock; /* protects list, index */
|
||||
struct list_head list; /* for IDs added at runtime */
|
||||
unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
|
||||
unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
|
||||
};
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
@@ -396,7 +413,7 @@ struct pci_driver {
|
||||
int (*resume_early) (struct pci_dev *dev);
|
||||
int (*resume) (struct pci_dev *dev); /* Device woken up */
|
||||
void (*shutdown) (struct pci_dev *dev);
|
||||
|
||||
struct pm_ext_ops *pm;
|
||||
struct pci_error_handlers *err_handler;
|
||||
struct device_driver driver;
|
||||
struct pci_dynids dynids;
|
||||
@@ -495,6 +512,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
|
||||
struct pci_ops *ops, void *sysdata);
|
||||
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
|
||||
int busnr);
|
||||
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
|
||||
const char *name);
|
||||
void pci_destroy_slot(struct pci_slot *slot);
|
||||
void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
|
||||
int pci_scan_slot(struct pci_bus *bus, int devfn);
|
||||
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
|
||||
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
|
||||
@@ -624,6 +645,8 @@ int pci_restore_state(struct pci_dev *dev);
|
||||
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
|
||||
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
|
||||
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
|
||||
int pci_prepare_to_sleep(struct pci_dev *dev);
|
||||
int pci_back_from_sleep(struct pci_dev *dev);
|
||||
|
||||
/* Functions for PCI Hotplug drivers to use */
|
||||
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
|
||||
@@ -845,6 +868,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
|
||||
unsigned int size)
|
||||
{
|
||||
@@ -983,9 +1011,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
|
||||
/* If you want to know what to call your pci_dev, ask this function.
|
||||
* Again, it's a wrapper around the generic device.
|
||||
*/
|
||||
static inline char *pci_name(struct pci_dev *pdev)
|
||||
static inline const char *pci_name(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev->dev.bus_id;
|
||||
return dev_name(&pdev->dev);
|
||||
}
|
||||
|
||||
|
||||
@@ -1020,7 +1048,9 @@ enum pci_fixup_pass {
|
||||
pci_fixup_header, /* After reading configuration header */
|
||||
pci_fixup_final, /* Final phase of device fixups */
|
||||
pci_fixup_enable, /* pci_enable_device() time */
|
||||
pci_fixup_resume, /* pci_enable_device() time */
|
||||
pci_fixup_resume, /* pci_device_resume() */
|
||||
pci_fixup_suspend, /* pci_device_suspend */
|
||||
pci_fixup_resume_early, /* pci_device_resume_early() */
|
||||
};
|
||||
|
||||
/* Anonymous variables would be nice... */
|
||||
@@ -1042,6 +1072,12 @@ enum pci_fixup_pass {
|
||||
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
|
||||
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
|
||||
resume##vendor##device##hook, vendor, device, hook)
|
||||
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
|
||||
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
|
||||
resume_early##vendor##device##hook, vendor, device, hook)
|
||||
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
|
||||
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
|
||||
suspend##vendor##device##hook, vendor, device, hook)
|
||||
|
||||
|
||||
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
|
||||
@@ -1066,7 +1102,10 @@ extern int pci_pci_problems;
|
||||
extern unsigned long pci_cardbus_io_size;
|
||||
extern unsigned long pci_cardbus_mem_size;
|
||||
|
||||
extern int pcibios_add_platform_entries(struct pci_dev *dev);
|
||||
int pcibios_add_platform_entries(struct pci_dev *dev);
|
||||
void pcibios_disable_device(struct pci_dev *dev);
|
||||
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
|
||||
enum pcie_reset_state state);
|
||||
|
||||
#ifdef CONFIG_PCI_MMCONFIG
|
||||
extern void __init pci_mmcfg_early_init(void);
|
||||
|
@@ -95,9 +95,6 @@ struct hotplug_slot_attribute {
|
||||
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
|
||||
* If this field is NULL, the value passed in the struct hotplug_slot_info
|
||||
* will be used when this value is requested by a user.
|
||||
* @get_address: Called to get pci address of a slot.
|
||||
* If this field is NULL, the value passed in the struct hotplug_slot_info
|
||||
* will be used when this value is requested by a user.
|
||||
* @get_max_bus_speed: Called to get the max bus speed for a slot.
|
||||
* If this field is NULL, the value passed in the struct hotplug_slot_info
|
||||
* will be used when this value is requested by a user.
|
||||
@@ -120,7 +117,6 @@ struct hotplug_slot_ops {
|
||||
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
|
||||
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
|
||||
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
|
||||
int (*get_address) (struct hotplug_slot *slot, u32 *value);
|
||||
int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
|
||||
int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
|
||||
};
|
||||
@@ -140,7 +136,6 @@ struct hotplug_slot_info {
|
||||
u8 attention_status;
|
||||
u8 latch_status;
|
||||
u8 adapter_status;
|
||||
u32 address;
|
||||
enum pci_bus_speed max_bus_speed;
|
||||
enum pci_bus_speed cur_bus_speed;
|
||||
};
|
||||
@@ -166,15 +161,14 @@ struct hotplug_slot {
|
||||
|
||||
/* Variables below this are for use only by the hotplug pci core. */
|
||||
struct list_head slot_list;
|
||||
struct kobject kobj;
|
||||
struct pci_slot *pci_slot;
|
||||
};
|
||||
#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
|
||||
|
||||
extern int pci_hp_register (struct hotplug_slot *slot);
|
||||
extern int pci_hp_deregister (struct hotplug_slot *slot);
|
||||
extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
|
||||
extern int pci_hp_deregister(struct hotplug_slot *slot);
|
||||
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
|
||||
struct hotplug_slot_info *info);
|
||||
extern struct kset *pci_hotplug_slots_kset;
|
||||
|
||||
/* PCI Setting Record (Type 0) */
|
||||
struct hpp_type0 {
|
||||
@@ -227,9 +221,9 @@ struct hotplug_params {
|
||||
#include <acpi/acpi.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/actypes.h>
|
||||
extern acpi_status acpi_run_oshp(acpi_handle handle);
|
||||
extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
|
||||
struct hotplug_params *hpp);
|
||||
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
|
||||
int acpi_root_bridge(acpi_handle handle);
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -2171,6 +2171,8 @@
|
||||
#define PCI_DEVICE_ID_MPC8544 0x0033
|
||||
#define PCI_DEVICE_ID_MPC8572E 0x0040
|
||||
#define PCI_DEVICE_ID_MPC8572 0x0041
|
||||
#define PCI_DEVICE_ID_MPC8536E 0x0050
|
||||
#define PCI_DEVICE_ID_MPC8536 0x0051
|
||||
#define PCI_DEVICE_ID_MPC8641 0x7010
|
||||
#define PCI_DEVICE_ID_MPC8641D 0x7011
|
||||
#define PCI_DEVICE_ID_MPC8610 0x7018
|
||||
@@ -2188,6 +2190,7 @@
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
|
||||
#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
|
||||
|
||||
#define PCI_VENDOR_ID_KORENIX 0x1982
|
||||
|
@@ -231,6 +231,7 @@
|
||||
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
|
||||
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
|
||||
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
|
||||
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
|
||||
#define PCI_PM_CTRL 4 /* PM control and status register */
|
||||
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
|
||||
#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
|
||||
|
@@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
|
||||
void percpu_counter_destroy(struct percpu_counter *fbc);
|
||||
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
|
||||
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc);
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
|
||||
|
||||
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
||||
{
|
||||
@@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
||||
|
||||
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
|
||||
{
|
||||
s64 ret = __percpu_counter_sum(fbc);
|
||||
s64 ret = __percpu_counter_sum(fbc, 0);
|
||||
return ret < 0 ? 0 : ret;
|
||||
}
|
||||
|
||||
static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
|
||||
{
|
||||
return __percpu_counter_sum(fbc, 1);
|
||||
}
|
||||
|
||||
|
||||
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
|
||||
{
|
||||
return __percpu_counter_sum(fbc);
|
||||
return __percpu_counter_sum(fbc, 0);
|
||||
}
|
||||
|
||||
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
|
||||
|
@@ -53,6 +53,7 @@ struct platform_driver {
|
||||
int (*suspend_late)(struct platform_device *, pm_message_t state);
|
||||
int (*resume_early)(struct platform_device *);
|
||||
int (*resume)(struct platform_device *);
|
||||
struct pm_ext_ops *pm;
|
||||
struct device_driver driver;
|
||||
};
|
||||
|
||||
|
@@ -112,7 +112,9 @@ typedef struct pm_message {
|
||||
int event;
|
||||
} pm_message_t;
|
||||
|
||||
/*
|
||||
/**
|
||||
* struct pm_ops - device PM callbacks
|
||||
*
|
||||
* Several driver power state transitions are externally visible, affecting
|
||||
* the state of pending I/O queues and (for drivers that touch hardware)
|
||||
* interrupts, wakeups, DMA, and other hardware state. There may also be
|
||||
@@ -120,6 +122,284 @@ typedef struct pm_message {
|
||||
* to the rest of the driver stack (such as a driver that's ON gating off
|
||||
* clocks which are not in active use).
|
||||
*
|
||||
* The externally visible transitions are handled with the help of the following
|
||||
* callbacks included in this structure:
|
||||
*
|
||||
* @prepare: Prepare the device for the upcoming transition, but do NOT change
|
||||
* its hardware state. Prevent new children of the device from being
|
||||
* registered after @prepare() returns (the driver's subsystem and
|
||||
* generally the rest of the kernel is supposed to prevent new calls to the
|
||||
* probe method from being made too once @prepare() has succeeded). If
|
||||
* @prepare() detects a situation it cannot handle (e.g. registration of a
|
||||
* child already in progress), it may return -EAGAIN, so that the PM core
|
||||
* can execute it once again (e.g. after the new child has been registered)
|
||||
* to recover from the race condition. This method is executed for all
|
||||
* kinds of suspend transitions and is followed by one of the suspend
|
||||
* callbacks: @suspend(), @freeze(), or @poweroff().
|
||||
* The PM core executes @prepare() for all devices before starting to
|
||||
* execute suspend callbacks for any of them, so drivers may assume all of
|
||||
* the other devices to be present and functional while @prepare() is being
|
||||
* executed. In particular, it is safe to make GFP_KERNEL memory
|
||||
* allocations from within @prepare(). However, drivers may NOT assume
|
||||
* anything about the availability of the user space at that time and it
|
||||
* is not correct to request firmware from within @prepare() (it's too
|
||||
* late to do that). [To work around this limitation, drivers may
|
||||
* register suspend and hibernation notifiers that are executed before the
|
||||
* freezing of tasks.]
|
||||
*
|
||||
* @complete: Undo the changes made by @prepare(). This method is executed for
|
||||
* all kinds of resume transitions, following one of the resume callbacks:
|
||||
* @resume(), @thaw(), @restore(). Also called if the state transition
|
||||
* fails before the driver's suspend callback (@suspend(), @freeze(),
|
||||
* @poweroff()) can be executed (e.g. if the suspend callback fails for one
|
||||
* of the other devices that the PM core has unsuccessfully attempted to
|
||||
* suspend earlier).
|
||||
* The PM core executes @complete() after it has executed the appropriate
|
||||
* resume callback for all devices.
|
||||
*
|
||||
* @suspend: Executed before putting the system into a sleep state in which the
|
||||
* contents of main memory are preserved. Quiesce the device, put it into
|
||||
* a low power state appropriate for the upcoming system state (such as
|
||||
* PCI_D3hot), and enable wakeup events as appropriate.
|
||||
*
|
||||
* @resume: Executed after waking the system up from a sleep state in which the
|
||||
* contents of main memory were preserved. Put the device into the
|
||||
* appropriate state, according to the information saved in memory by the
|
||||
* preceding @suspend(). The driver starts working again, responding to
|
||||
* hardware events and software requests. The hardware may have gone
|
||||
* through a power-off reset, or it may have maintained state from the
|
||||
* previous suspend() which the driver may rely on while resuming. On most
|
||||
* platforms, there are no restrictions on availability of resources like
|
||||
* clocks during @resume().
|
||||
*
|
||||
* @freeze: Hibernation-specific, executed before creating a hibernation image.
|
||||
* Quiesce operations so that a consistent image can be created, but do NOT
|
||||
* otherwise put the device into a low power device state and do NOT emit
|
||||
* system wakeup events. Save in main memory the device settings to be
|
||||
* used by @restore() during the subsequent resume from hibernation or by
|
||||
* the subsequent @thaw(), if the creation of the image or the restoration
|
||||
* of main memory contents from it fails.
|
||||
*
|
||||
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
|
||||
* if the creation of the image fails. Also executed after a failing
|
||||
* attempt to restore the contents of main memory from such an image.
|
||||
* Undo the changes made by the preceding @freeze(), so the device can be
|
||||
* operated in the same way as immediately before the call to @freeze().
|
||||
*
|
||||
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
|
||||
* Quiesce the device, put it into a low power state appropriate for the
|
||||
* upcoming system state (such as PCI_D3hot), and enable wakeup events as
|
||||
* appropriate.
|
||||
*
|
||||
* @restore: Hibernation-specific, executed after restoring the contents of main
|
||||
* memory from a hibernation image. Driver starts working again,
|
||||
* responding to hardware events and software requests. Drivers may NOT
|
||||
* make ANY assumptions about the hardware state right prior to @restore().
|
||||
* On most platforms, there are no restrictions on availability of
|
||||
* resources like clocks during @restore().
|
||||
*
|
||||
* All of the above callbacks, except for @complete(), return error codes.
|
||||
* However, the error codes returned by the resume operations, @resume(),
|
||||
* @thaw(), and @restore(), do not cause the PM core to abort the resume
|
||||
* transition during which they are returned. The error codes returned in
|
||||
* that cases are only printed by the PM core to the system logs for debugging
|
||||
* purposes. Still, it is recommended that drivers only return error codes
|
||||
* from their resume methods in case of an unrecoverable failure (i.e. when the
|
||||
* device being handled refuses to resume and becomes unusable) to allow us to
|
||||
* modify the PM core in the future, so that it can avoid attempting to handle
|
||||
* devices that failed to resume and their children.
|
||||
*
|
||||
* It is allowed to unregister devices while the above callbacks are being
|
||||
* executed. However, it is not allowed to unregister a device from within any
|
||||
* of its own callbacks.
|
||||
*/
|
||||
|
||||
struct pm_ops {
|
||||
int (*prepare)(struct device *dev);
|
||||
void (*complete)(struct device *dev);
|
||||
int (*suspend)(struct device *dev);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*freeze)(struct device *dev);
|
||||
int (*thaw)(struct device *dev);
|
||||
int (*poweroff)(struct device *dev);
|
||||
int (*restore)(struct device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pm_ext_ops - extended device PM callbacks
|
||||
*
|
||||
* Some devices require certain operations related to suspend and hibernation
|
||||
* to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below
|
||||
* is defined, adding callbacks to be executed with interrupts disabled to
|
||||
* 'struct pm_ops'.
|
||||
*
|
||||
* The following callbacks included in 'struct pm_ext_ops' are executed with
|
||||
* the nonboot CPUs switched off and with interrupts disabled on the only
|
||||
* functional CPU. They also are executed with the PM core list of devices
|
||||
* locked, so they must NOT unregister any devices.
|
||||
*
|
||||
* @suspend_noirq: Complete the operations of ->suspend() by carrying out any
|
||||
* actions required for suspending the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @resume_noirq: Prepare for the execution of ->resume() by carrying out any
|
||||
* actions required for resuming the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @freeze_noirq: Complete the operations of ->freeze() by carrying out any
|
||||
* actions required for freezing the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
|
||||
* actions required for thawing the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
|
||||
* actions required for handling the device that need interrupts to be
|
||||
* disabled
|
||||
*
|
||||
* @restore_noirq: Prepare for the execution of ->restore() by carrying out any
|
||||
* actions required for restoring the operations of the device that need
|
||||
* interrupts to be disabled
|
||||
*
|
||||
* All of the above callbacks return error codes, but the error codes returned
|
||||
* by the resume operations, @resume_noirq(), @thaw_noirq(), and
|
||||
* @restore_noirq(), do not cause the PM core to abort the resume transition
|
||||
* during which they are returned. The error codes returned in that cases are
|
||||
* only printed by the PM core to the system logs for debugging purposes.
|
||||
* Still, as stated above, it is recommended that drivers only return error
|
||||
* codes from their resume methods if the device being handled fails to resume
|
||||
* and is not usable any more.
|
||||
*/
|
||||
|
||||
struct pm_ext_ops {
|
||||
struct pm_ops base;
|
||||
int (*suspend_noirq)(struct device *dev);
|
||||
int (*resume_noirq)(struct device *dev);
|
||||
int (*freeze_noirq)(struct device *dev);
|
||||
int (*thaw_noirq)(struct device *dev);
|
||||
int (*poweroff_noirq)(struct device *dev);
|
||||
int (*restore_noirq)(struct device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* PM_EVENT_ messages
|
||||
*
|
||||
* The following PM_EVENT_ messages are defined for the internal use of the PM
|
||||
* core, in order to provide a mechanism allowing the high level suspend and
|
||||
* hibernation code to convey the necessary information to the device PM core
|
||||
* code:
|
||||
*
|
||||
* ON No transition.
|
||||
*
|
||||
* FREEZE System is going to hibernate, call ->prepare() and ->freeze()
|
||||
* for all devices.
|
||||
*
|
||||
* SUSPEND System is going to suspend, call ->prepare() and ->suspend()
|
||||
* for all devices.
|
||||
*
|
||||
* HIBERNATE Hibernation image has been saved, call ->prepare() and
|
||||
* ->poweroff() for all devices.
|
||||
*
|
||||
* QUIESCE Contents of main memory are going to be restored from a (loaded)
|
||||
* hibernation image, call ->prepare() and ->freeze() for all
|
||||
* devices.
|
||||
*
|
||||
* RESUME System is resuming, call ->resume() and ->complete() for all
|
||||
* devices.
|
||||
*
|
||||
* THAW Hibernation image has been created, call ->thaw() and
|
||||
* ->complete() for all devices.
|
||||
*
|
||||
* RESTORE Contents of main memory have been restored from a hibernation
|
||||
* image, call ->restore() and ->complete() for all devices.
|
||||
*
|
||||
* RECOVER Creation of a hibernation image or restoration of the main
|
||||
* memory contents from a hibernation image has failed, call
|
||||
* ->thaw() and ->complete() for all devices.
|
||||
*/
|
||||
|
||||
#define PM_EVENT_ON 0x0000
|
||||
#define PM_EVENT_FREEZE 0x0001
|
||||
#define PM_EVENT_SUSPEND 0x0002
|
||||
#define PM_EVENT_HIBERNATE 0x0004
|
||||
#define PM_EVENT_QUIESCE 0x0008
|
||||
#define PM_EVENT_RESUME 0x0010
|
||||
#define PM_EVENT_THAW 0x0020
|
||||
#define PM_EVENT_RESTORE 0x0040
|
||||
#define PM_EVENT_RECOVER 0x0080
|
||||
|
||||
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
|
||||
|
||||
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
|
||||
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
|
||||
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
|
||||
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
|
||||
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
|
||||
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
|
||||
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
|
||||
#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
|
||||
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
|
||||
|
||||
/**
|
||||
* Device power management states
|
||||
*
|
||||
* These state labels are used internally by the PM core to indicate the current
|
||||
* status of a device with respect to the PM core operations.
|
||||
*
|
||||
* DPM_ON Device is regarded as operational. Set this way
|
||||
* initially and when ->complete() is about to be called.
|
||||
* Also set when ->prepare() fails.
|
||||
*
|
||||
* DPM_PREPARING Device is going to be prepared for a PM transition. Set
|
||||
* when ->prepare() is about to be called.
|
||||
*
|
||||
* DPM_RESUMING Device is going to be resumed. Set when ->resume(),
|
||||
* ->thaw(), or ->restore() is about to be called.
|
||||
*
|
||||
* DPM_SUSPENDING Device has been prepared for a power transition. Set
|
||||
* when ->prepare() has just succeeded.
|
||||
*
|
||||
* DPM_OFF Device is regarded as inactive. Set immediately after
|
||||
* ->suspend(), ->freeze(), or ->poweroff() has succeeded.
|
||||
* Also set when ->resume()_noirq, ->thaw_noirq(), or
|
||||
* ->restore_noirq() is about to be called.
|
||||
*
|
||||
* DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after
|
||||
* ->suspend_noirq(), ->freeze_noirq(), or
|
||||
* ->poweroff_noirq() has just succeeded.
|
||||
*/
|
||||
|
||||
enum dpm_state {
|
||||
DPM_INVALID,
|
||||
DPM_ON,
|
||||
DPM_PREPARING,
|
||||
DPM_RESUMING,
|
||||
DPM_SUSPENDING,
|
||||
DPM_OFF,
|
||||
DPM_OFF_IRQ,
|
||||
};
|
||||
|
||||
struct dev_pm_info {
|
||||
pm_message_t power_state;
|
||||
unsigned can_wakeup:1;
|
||||
unsigned should_wakeup:1;
|
||||
enum dpm_state status; /* Owned by the PM core */
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
struct list_head entry;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* The PM_EVENT_ messages are also used by drivers implementing the legacy
|
||||
* suspend framework, based on the ->suspend() and ->resume() callbacks common
|
||||
* for suspend and hibernation transitions, according to the rules below.
|
||||
*/
|
||||
|
||||
/* Necessary, because several drivers use PM_EVENT_PRETHAW */
|
||||
#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
|
||||
|
||||
/*
|
||||
* One transition is triggered by resume(), after a suspend() call; the
|
||||
* message is implicit:
|
||||
*
|
||||
@@ -164,35 +444,13 @@ typedef struct pm_message {
|
||||
* or from system low-power states such as standby or suspend-to-RAM.
|
||||
*/
|
||||
|
||||
#define PM_EVENT_ON 0
|
||||
#define PM_EVENT_FREEZE 1
|
||||
#define PM_EVENT_SUSPEND 2
|
||||
#define PM_EVENT_HIBERNATE 4
|
||||
#define PM_EVENT_PRETHAW 8
|
||||
|
||||
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
|
||||
|
||||
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
|
||||
#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
|
||||
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
|
||||
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
|
||||
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
|
||||
|
||||
struct dev_pm_info {
|
||||
pm_message_t power_state;
|
||||
unsigned can_wakeup:1;
|
||||
unsigned should_wakeup:1;
|
||||
bool sleeping:1; /* Owned by the PM core */
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
struct list_head entry;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern int device_power_down(pm_message_t state);
|
||||
extern void device_power_up(void);
|
||||
extern void device_resume(void);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
extern void device_pm_lock(void);
|
||||
extern void device_power_up(pm_message_t state);
|
||||
extern void device_resume(pm_message_t state);
|
||||
|
||||
extern void device_pm_unlock(void);
|
||||
extern int device_power_down(pm_message_t state);
|
||||
extern int device_suspend(pm_message_t state);
|
||||
extern int device_prepare_suspend(pm_message_t state);
|
||||
|
||||
|
@@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val)
|
||||
dev->power.can_wakeup = dev->power.should_wakeup = !!val;
|
||||
}
|
||||
|
||||
static inline void device_set_wakeup_capable(struct device *dev, int val)
|
||||
{
|
||||
dev->power.can_wakeup = !!val;
|
||||
}
|
||||
|
||||
static inline int device_can_wakeup(struct device *dev)
|
||||
{
|
||||
return dev->power.can_wakeup;
|
||||
@@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val)
|
||||
|
||||
static inline int device_may_wakeup(struct device *dev)
|
||||
{
|
||||
return dev->power.can_wakeup & dev->power.should_wakeup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Platform hook to activate device wakeup capability, if that's not already
|
||||
* handled by enable_irq_wake() etc.
|
||||
* Returns zero on success, else negative errno
|
||||
*/
|
||||
extern int (*platform_enable_wakeup)(struct device *dev, int is_on);
|
||||
|
||||
static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
|
||||
{
|
||||
if (platform_enable_wakeup)
|
||||
return (*platform_enable_wakeup)(dev, is_on);
|
||||
return 0;
|
||||
return dev->power.can_wakeup && dev->power.should_wakeup;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM */
|
||||
@@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val)
|
||||
dev->power.can_wakeup = !!val;
|
||||
}
|
||||
|
||||
static inline void device_set_wakeup_capable(struct device *dev, int val) { }
|
||||
|
||||
static inline int device_can_wakeup(struct device *dev)
|
||||
{
|
||||
return dev->power.can_wakeup;
|
||||
@@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev)
|
||||
#define device_set_wakeup_enable(dev, val) do {} while (0)
|
||||
#define device_may_wakeup(dev) 0
|
||||
|
||||
static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
#endif /* _LINUX_PM_WAKEUP_H */
|
||||
|
@@ -1,6 +1,8 @@
|
||||
/*
|
||||
* Linux Plug and Play Support
|
||||
* Copyright by Adam Belay <ambx1@neo.rr.com>
|
||||
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
|
||||
* Bjorn Helgaas <bjorn.helgaas@hp.com>
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_PNP_H
|
||||
@@ -15,7 +17,6 @@
|
||||
|
||||
struct pnp_protocol;
|
||||
struct pnp_dev;
|
||||
struct pnp_resource_table;
|
||||
|
||||
/*
|
||||
* Resource Management
|
||||
@@ -24,7 +25,14 @@ struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int);
|
||||
|
||||
static inline int pnp_resource_valid(struct resource *res)
|
||||
{
|
||||
if (res && !(res->flags & IORESOURCE_UNSET))
|
||||
if (res)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pnp_resource_enabled(struct resource *res)
|
||||
{
|
||||
if (res && !(res->flags & IORESOURCE_DISABLED))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -40,19 +48,31 @@ static inline resource_size_t pnp_resource_len(struct resource *res)
|
||||
static inline resource_size_t pnp_port_start(struct pnp_dev *dev,
|
||||
unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_IO, bar)->start;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline resource_size_t pnp_port_end(struct pnp_dev *dev,
|
||||
unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_IO, bar)->end;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->end;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long pnp_port_flags(struct pnp_dev *dev,
|
||||
unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_IO, bar)->flags;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->flags;
|
||||
return IORESOURCE_IO | IORESOURCE_AUTO;
|
||||
}
|
||||
|
||||
static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
@@ -63,25 +83,41 @@ static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
static inline resource_size_t pnp_port_len(struct pnp_dev *dev,
|
||||
unsigned int bar)
|
||||
{
|
||||
return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_IO, bar));
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return pnp_resource_len(res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static inline resource_size_t pnp_mem_start(struct pnp_dev *dev,
|
||||
unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_MEM, bar)->start;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline resource_size_t pnp_mem_end(struct pnp_dev *dev,
|
||||
unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_MEM, bar)->end;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->end;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_MEM, bar)->flags;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->flags;
|
||||
return IORESOURCE_MEM | IORESOURCE_AUTO;
|
||||
}
|
||||
|
||||
static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
@@ -92,18 +128,30 @@ static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
static inline resource_size_t pnp_mem_len(struct pnp_dev *dev,
|
||||
unsigned int bar)
|
||||
{
|
||||
return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_MEM, bar));
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return pnp_resource_len(res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->start;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->start;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->flags;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->flags;
|
||||
return IORESOURCE_IRQ | IORESOURCE_AUTO;
|
||||
}
|
||||
|
||||
static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
@@ -114,12 +162,20 @@ static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
|
||||
static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_DMA, bar)->start;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->start;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar)
|
||||
{
|
||||
return pnp_get_resource(dev, IORESOURCE_DMA, bar)->flags;
|
||||
struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
|
||||
|
||||
if (pnp_resource_valid(res))
|
||||
return res->flags;
|
||||
return IORESOURCE_DMA | IORESOURCE_AUTO;
|
||||
}
|
||||
|
||||
static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
@@ -128,57 +184,6 @@ static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
|
||||
}
|
||||
|
||||
|
||||
#define PNP_PORT_FLAG_16BITADDR (1<<0)
|
||||
#define PNP_PORT_FLAG_FIXED (1<<1)
|
||||
|
||||
struct pnp_port {
|
||||
unsigned short min; /* min base number */
|
||||
unsigned short max; /* max base number */
|
||||
unsigned char align; /* align boundary */
|
||||
unsigned char size; /* size of range */
|
||||
unsigned char flags; /* port flags */
|
||||
unsigned char pad; /* pad */
|
||||
struct pnp_port *next; /* next port */
|
||||
};
|
||||
|
||||
#define PNP_IRQ_NR 256
|
||||
struct pnp_irq {
|
||||
DECLARE_BITMAP(map, PNP_IRQ_NR); /* bitmask for IRQ lines */
|
||||
unsigned char flags; /* IRQ flags */
|
||||
unsigned char pad; /* pad */
|
||||
struct pnp_irq *next; /* next IRQ */
|
||||
};
|
||||
|
||||
struct pnp_dma {
|
||||
unsigned char map; /* bitmask for DMA channels */
|
||||
unsigned char flags; /* DMA flags */
|
||||
struct pnp_dma *next; /* next port */
|
||||
};
|
||||
|
||||
struct pnp_mem {
|
||||
unsigned int min; /* min base number */
|
||||
unsigned int max; /* max base number */
|
||||
unsigned int align; /* align boundary */
|
||||
unsigned int size; /* size of range */
|
||||
unsigned char flags; /* memory flags */
|
||||
unsigned char pad; /* pad */
|
||||
struct pnp_mem *next; /* next memory resource */
|
||||
};
|
||||
|
||||
#define PNP_RES_PRIORITY_PREFERRED 0
|
||||
#define PNP_RES_PRIORITY_ACCEPTABLE 1
|
||||
#define PNP_RES_PRIORITY_FUNCTIONAL 2
|
||||
#define PNP_RES_PRIORITY_INVALID 65535
|
||||
|
||||
struct pnp_option {
|
||||
unsigned short priority; /* priority */
|
||||
struct pnp_port *port; /* first port */
|
||||
struct pnp_irq *irq; /* first IRQ */
|
||||
struct pnp_dma *dma; /* first DMA */
|
||||
struct pnp_mem *mem; /* first memory resource */
|
||||
struct pnp_option *next; /* used to chain dependent resources */
|
||||
};
|
||||
|
||||
/*
|
||||
* Device Management
|
||||
*/
|
||||
@@ -246,9 +251,9 @@ struct pnp_dev {
|
||||
|
||||
int active;
|
||||
int capabilities;
|
||||
struct pnp_option *independent;
|
||||
struct pnp_option *dependent;
|
||||
struct pnp_resource_table *res;
|
||||
unsigned int num_dependent_sets;
|
||||
struct list_head resources;
|
||||
struct list_head options;
|
||||
|
||||
char name[PNP_NAME_LEN]; /* contains a human-readable name */
|
||||
int flags; /* used by protocols */
|
||||
@@ -425,6 +430,8 @@ void pnp_unregister_card_driver(struct pnp_card_driver *drv);
|
||||
extern struct list_head pnp_cards;
|
||||
|
||||
/* resource management */
|
||||
int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base,
|
||||
resource_size_t size);
|
||||
int pnp_auto_config_dev(struct pnp_dev *dev);
|
||||
int pnp_start_dev(struct pnp_dev *dev);
|
||||
int pnp_stop_dev(struct pnp_dev *dev);
|
||||
@@ -452,6 +459,9 @@ static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return
|
||||
static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { }
|
||||
|
||||
/* resource management */
|
||||
static inline int pnp_possible_config(struct pnp_dev *dev, int type,
|
||||
resource_size_t base,
|
||||
resource_size_t size) { return 0; }
|
||||
static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; }
|
||||
static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
|
||||
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
|
||||
|
@@ -10,7 +10,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_PREEMPT
|
||||
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
||||
extern void add_preempt_count(int val);
|
||||
extern void sub_preempt_count(int val);
|
||||
#else
|
||||
@@ -52,6 +52,34 @@ do { \
|
||||
preempt_check_resched(); \
|
||||
} while (0)
|
||||
|
||||
/* For debugging and tracer internals only! */
|
||||
#define add_preempt_count_notrace(val) \
|
||||
do { preempt_count() += (val); } while (0)
|
||||
#define sub_preempt_count_notrace(val) \
|
||||
do { preempt_count() -= (val); } while (0)
|
||||
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
|
||||
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
|
||||
|
||||
#define preempt_disable_notrace() \
|
||||
do { \
|
||||
inc_preempt_count_notrace(); \
|
||||
barrier(); \
|
||||
} while (0)
|
||||
|
||||
#define preempt_enable_no_resched_notrace() \
|
||||
do { \
|
||||
barrier(); \
|
||||
dec_preempt_count_notrace(); \
|
||||
} while (0)
|
||||
|
||||
/* preempt_check_resched is OK to trace */
|
||||
#define preempt_enable_notrace() \
|
||||
do { \
|
||||
preempt_enable_no_resched_notrace(); \
|
||||
barrier(); \
|
||||
preempt_check_resched(); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
||||
#define preempt_disable() do { } while (0)
|
||||
@@ -59,6 +87,10 @@ do { \
|
||||
#define preempt_enable() do { } while (0)
|
||||
#define preempt_check_resched() do { } while (0)
|
||||
|
||||
#define preempt_disable_notrace() do { } while (0)
|
||||
#define preempt_enable_no_resched_notrace() do { } while (0)
|
||||
#define preempt_enable_notrace() do { } while (0)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
||||
|
@@ -95,8 +95,12 @@ extern void __ptrace_link(struct task_struct *child,
|
||||
struct task_struct *new_parent);
|
||||
extern void __ptrace_unlink(struct task_struct *child);
|
||||
extern void ptrace_untrace(struct task_struct *child);
|
||||
extern int ptrace_may_attach(struct task_struct *task);
|
||||
extern int __ptrace_may_attach(struct task_struct *task);
|
||||
#define PTRACE_MODE_READ 1
|
||||
#define PTRACE_MODE_ATTACH 2
|
||||
/* Returns 0 on success, -errno on denial. */
|
||||
extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
|
||||
/* Returns true on success, false on denial. */
|
||||
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
|
||||
|
||||
static inline int ptrace_reparented(struct task_struct *child)
|
||||
{
|
||||
|
31
include/linux/pwm.h
Normal file
31
include/linux/pwm.h
Normal file
@@ -0,0 +1,31 @@
|
||||
#ifndef __LINUX_PWM_H
|
||||
#define __LINUX_PWM_H
|
||||
|
||||
struct pwm_device;
|
||||
|
||||
/*
|
||||
* pwm_request - request a PWM device
|
||||
*/
|
||||
struct pwm_device *pwm_request(int pwm_id, const char *label);
|
||||
|
||||
/*
|
||||
* pwm_free - free a PWM device
|
||||
*/
|
||||
void pwm_free(struct pwm_device *pwm);
|
||||
|
||||
/*
|
||||
* pwm_config - change a PWM device configuration
|
||||
*/
|
||||
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
|
||||
|
||||
/*
|
||||
* pwm_enable - start a PWM output toggling
|
||||
*/
|
||||
int pwm_enable(struct pwm_device *pwm);
|
||||
|
||||
/*
|
||||
* pwm_disable - stop a PWM output toggling
|
||||
*/
|
||||
void pwm_disable(struct pwm_device *pwm);
|
||||
|
||||
#endif /* __ASM_ARCH_PWM_H */
|
17
include/linux/pwm_backlight.h
Normal file
17
include/linux/pwm_backlight.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c
|
||||
*/
|
||||
#ifndef __LINUX_PWM_BACKLIGHT_H
|
||||
#define __LINUX_PWM_BACKLIGHT_H
|
||||
|
||||
struct platform_pwm_backlight_data {
|
||||
int pwm_id;
|
||||
unsigned int max_brightness;
|
||||
unsigned int dft_brightness;
|
||||
unsigned int pwm_period_ns;
|
||||
int (*init)(struct device *dev);
|
||||
int (*notify)(int brightness);
|
||||
void (*exit)(struct device *dev);
|
||||
};
|
||||
|
||||
#endif
|
@@ -151,7 +151,10 @@ extern struct lockdep_map rcu_lock_map;
|
||||
|
||||
#define __synchronize_sched() synchronize_rcu()
|
||||
|
||||
#define call_rcu_sched(head, func) call_rcu(head, func)
|
||||
|
||||
extern void __rcu_init(void);
|
||||
#define rcu_init_sched() do { } while (0)
|
||||
extern void rcu_check_callbacks(int cpu, int user);
|
||||
extern void rcu_restart_cpu(int cpu);
|
||||
|
||||
|
@@ -1,6 +1,373 @@
|
||||
#ifndef _LINUX_RCULIST_H
|
||||
#define _LINUX_RCULIST_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#endif /* _LINUX_RCULIST_H */
|
||||
/*
|
||||
* RCU-protected list version
|
||||
*/
|
||||
#include <linux/list.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
/*
|
||||
* Insert a new entry between two known consecutive entries.
|
||||
*
|
||||
* This is only for internal list manipulation where we know
|
||||
* the prev/next entries already!
|
||||
*/
|
||||
static inline void __list_add_rcu(struct list_head *new,
|
||||
struct list_head *prev, struct list_head *next)
|
||||
{
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
rcu_assign_pointer(prev->next, new);
|
||||
next->prev = new;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_add_rcu - add a new entry to rcu-protected list
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it after
|
||||
*
|
||||
* Insert a new entry after the specified head.
|
||||
* This is good for implementing stacks.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_add_rcu()
|
||||
* or list_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*/
|
||||
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
|
||||
{
|
||||
__list_add_rcu(new, head, head->next);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_add_tail_rcu - add a new entry to rcu-protected list
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it before
|
||||
*
|
||||
* Insert a new entry before the specified head.
|
||||
* This is useful for implementing queues.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_add_tail_rcu()
|
||||
* or list_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*/
|
||||
static inline void list_add_tail_rcu(struct list_head *new,
|
||||
struct list_head *head)
|
||||
{
|
||||
__list_add_rcu(new, head->prev, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_del_rcu - deletes entry from list without re-initialization
|
||||
* @entry: the element to delete from the list.
|
||||
*
|
||||
* Note: list_empty() on entry does not return true after this,
|
||||
* the entry is in an undefined state. It is useful for RCU based
|
||||
* lockfree traversal.
|
||||
*
|
||||
* In particular, it means that we can not poison the forward
|
||||
* pointers that may still be used for walking the list.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_del_rcu()
|
||||
* or list_add_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*
|
||||
* Note that the caller is not permitted to immediately free
|
||||
* the newly deleted entry. Instead, either synchronize_rcu()
|
||||
* or call_rcu() must be used to defer freeing until an RCU
|
||||
* grace period has elapsed.
|
||||
*/
|
||||
static inline void list_del_rcu(struct list_head *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
entry->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_replace_rcu - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* The @old entry will be replaced with the @new entry atomically.
|
||||
* Note: @old should not be empty.
|
||||
*/
|
||||
static inline void list_replace_rcu(struct list_head *old,
|
||||
struct list_head *new)
|
||||
{
|
||||
new->next = old->next;
|
||||
new->prev = old->prev;
|
||||
rcu_assign_pointer(new->prev->next, new);
|
||||
new->next->prev = new;
|
||||
old->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_init_rcu - splice an RCU-protected list into an existing list.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*
|
||||
* @head can be RCU-read traversed concurrently with this function.
|
||||
*
|
||||
* Note that this function blocks.
|
||||
*
|
||||
* Important note: the caller must take whatever action is necessary to
|
||||
* prevent any other updates to @head. In principle, it is possible
|
||||
* to modify the list as soon as sync() begins execution.
|
||||
* If this sort of thing becomes necessary, an alternative version
|
||||
* based on call_rcu() could be created. But only if -really-
|
||||
* needed -- there is no shortage of RCU API members.
|
||||
*/
|
||||
static inline void list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
void (*sync)(void))
|
||||
{
|
||||
struct list_head *first = list->next;
|
||||
struct list_head *last = list->prev;
|
||||
struct list_head *at = head->next;
|
||||
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
/* "first" and "last" tracking list, so initialize it. */
|
||||
|
||||
INIT_LIST_HEAD(list);
|
||||
|
||||
/*
|
||||
* At this point, the list body still points to the source list.
|
||||
* Wait for any readers to finish using the list before splicing
|
||||
* the list body into the new list. Any new readers will see
|
||||
* an empty list.
|
||||
*/
|
||||
|
||||
sync();
|
||||
|
||||
/*
|
||||
* Readers are finished with the source list, so perform splice.
|
||||
* The order is important if the new list is global and accessible
|
||||
* to concurrent RCU readers. Note that RCU readers are not
|
||||
* permitted to traverse the prev pointers without excluding
|
||||
* this function.
|
||||
*/
|
||||
|
||||
last->next = at;
|
||||
rcu_assign_pointer(head->next, first);
|
||||
first->prev = head;
|
||||
at->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_for_each_rcu - iterate over an rcu-protected list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
prefetch(pos->next), pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_struct within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
|
||||
prefetch(pos->member.next), &pos->member != (head); \
|
||||
pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
|
||||
|
||||
|
||||
/**
|
||||
* list_for_each_continue_rcu
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*
|
||||
* Iterate over an rcu-protected list, continuing after current point.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_continue_rcu(pos, head) \
|
||||
for ((pos) = rcu_dereference((pos)->next); \
|
||||
prefetch((pos)->next), (pos) != (head); \
|
||||
(pos) = rcu_dereference((pos)->next))
|
||||
|
||||
/**
|
||||
* hlist_del_rcu - deletes entry from hash list without re-initialization
|
||||
* @n: the element to delete from the hash list.
|
||||
*
|
||||
* Note: list_unhashed() on entry does not return true after this,
|
||||
* the entry is in an undefined state. It is useful for RCU based
|
||||
* lockfree traversal.
|
||||
*
|
||||
* In particular, it means that we can not poison the forward
|
||||
* pointers that may still be used for walking the hash list.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry().
|
||||
*/
|
||||
static inline void hlist_del_rcu(struct hlist_node *n)
|
||||
{
|
||||
__hlist_del(n);
|
||||
n->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_replace_rcu - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* The @old entry will be replaced with the @new entry atomically.
|
||||
*/
|
||||
static inline void hlist_replace_rcu(struct hlist_node *old,
|
||||
struct hlist_node *new)
|
||||
{
|
||||
struct hlist_node *next = old->next;
|
||||
|
||||
new->next = next;
|
||||
new->pprev = old->pprev;
|
||||
rcu_assign_pointer(*new->pprev, new);
|
||||
if (next)
|
||||
new->next->pprev = &new->next;
|
||||
old->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_head_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist,
|
||||
* while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_add_head_rcu(struct hlist_node *n,
|
||||
struct hlist_head *h)
|
||||
{
|
||||
struct hlist_node *first = h->first;
|
||||
|
||||
n->next = first;
|
||||
n->pprev = &h->first;
|
||||
rcu_assign_pointer(h->first, n);
|
||||
if (first)
|
||||
first->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_before_rcu
|
||||
* @n: the new element to add to the hash list.
|
||||
* @next: the existing element to add the new element before.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist
|
||||
* before the specified node while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs.
|
||||
*/
|
||||
static inline void hlist_add_before_rcu(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
{
|
||||
n->pprev = next->pprev;
|
||||
n->next = next;
|
||||
rcu_assign_pointer(*(n->pprev), n);
|
||||
next->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_after_rcu
|
||||
* @prev: the existing element to add the new element after.
|
||||
* @n: the new element to add to the hash list.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist
|
||||
* after the specified node while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs.
|
||||
*/
|
||||
static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
struct hlist_node *n)
|
||||
{
|
||||
n->next = prev->next;
|
||||
n->pprev = &prev->next;
|
||||
rcu_assign_pointer(prev->next, n);
|
||||
if (n->next)
|
||||
n->next->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = rcu_dereference((head)->first); \
|
||||
pos && ({ prefetch(pos->next); 1; }) && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
@@ -40,6 +40,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
/**
|
||||
* struct rcu_head - callback structure for use with RCU
|
||||
@@ -168,6 +169,27 @@ struct rcu_head {
|
||||
(p) = (v); \
|
||||
})
|
||||
|
||||
/* Infrastructure to implement the synchronize_() primitives. */
|
||||
|
||||
struct rcu_synchronize {
|
||||
struct rcu_head head;
|
||||
struct completion completion;
|
||||
};
|
||||
|
||||
extern void wakeme_after_rcu(struct rcu_head *head);
|
||||
|
||||
#define synchronize_rcu_xxx(name, func) \
|
||||
void name(void) \
|
||||
{ \
|
||||
struct rcu_synchronize rcu; \
|
||||
\
|
||||
init_completion(&rcu.completion); \
|
||||
/* Will wake me after RCU finished. */ \
|
||||
func(&rcu.head, wakeme_after_rcu); \
|
||||
/* Wait for it. */ \
|
||||
wait_for_completion(&rcu.completion); \
|
||||
}
|
||||
|
||||
/**
|
||||
* synchronize_sched - block until all CPUs have exited any non-preemptive
|
||||
* kernel code sequences.
|
||||
@@ -224,8 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head,
|
||||
/* Exported common interfaces */
|
||||
extern void synchronize_rcu(void);
|
||||
extern void rcu_barrier(void);
|
||||
extern long rcu_batches_completed(void);
|
||||
extern long rcu_batches_completed_bh(void);
|
||||
extern void rcu_barrier_bh(void);
|
||||
extern void rcu_barrier_sched(void);
|
||||
|
||||
/* Internal to kernel */
|
||||
extern void rcu_init(void);
|
||||
|
@@ -40,10 +40,39 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
#define rcu_qsctr_inc(cpu)
|
||||
struct rcu_dyntick_sched {
|
||||
int dynticks;
|
||||
int dynticks_snap;
|
||||
int sched_qs;
|
||||
int sched_qs_snap;
|
||||
int sched_dynticks_snap;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
|
||||
|
||||
static inline void rcu_qsctr_inc(int cpu)
|
||||
{
|
||||
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
|
||||
|
||||
rdssp->sched_qs++;
|
||||
}
|
||||
#define rcu_bh_qsctr_inc(cpu)
|
||||
#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
|
||||
|
||||
/**
|
||||
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
|
||||
* @head: structure to be used for queueing the RCU updates.
|
||||
* @func: actual update function to be invoked after the grace period
|
||||
*
|
||||
* The update function will be invoked some time after a full
|
||||
* synchronize_sched()-style grace period elapses, in other words after
|
||||
* all currently executing preempt-disabled sections of code (including
|
||||
* hardirq handlers, NMI handlers, and local_irq_save() blocks) have
|
||||
* completed.
|
||||
*/
|
||||
extern void call_rcu_sched(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head));
|
||||
|
||||
extern void __rcu_read_lock(void) __acquires(RCU);
|
||||
extern void __rcu_read_unlock(void) __releases(RCU);
|
||||
extern int rcu_pending(int cpu);
|
||||
@@ -55,6 +84,7 @@ extern int rcu_needs_cpu(int cpu);
|
||||
extern void __synchronize_sched(void);
|
||||
|
||||
extern void __rcu_init(void);
|
||||
extern void rcu_init_sched(void);
|
||||
extern void rcu_check_callbacks(int cpu, int user);
|
||||
extern void rcu_restart_cpu(int cpu);
|
||||
extern long rcu_batches_completed(void);
|
||||
@@ -81,20 +111,20 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
|
||||
struct softirq_action;
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
DECLARE_PER_CPU(long, dynticks_progress_counter);
|
||||
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
|
||||
|
||||
static inline void rcu_enter_nohz(void)
|
||||
{
|
||||
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
||||
__get_cpu_var(dynticks_progress_counter)++;
|
||||
WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1);
|
||||
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
||||
WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1);
|
||||
}
|
||||
|
||||
static inline void rcu_exit_nohz(void)
|
||||
{
|
||||
__get_cpu_var(dynticks_progress_counter)++;
|
||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||
WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1));
|
||||
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
||||
WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1));
|
||||
}
|
||||
|
||||
#else /* CONFIG_NO_HZ */
|
||||
|
@@ -134,7 +134,6 @@ extern unsigned long nr_running(void);
|
||||
extern unsigned long nr_uninterruptible(void);
|
||||
extern unsigned long nr_active(void);
|
||||
extern unsigned long nr_iowait(void);
|
||||
extern unsigned long weighted_cpuload(const int cpu);
|
||||
|
||||
struct seq_file;
|
||||
struct cfs_rq;
|
||||
@@ -246,6 +245,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
|
||||
extern void init_idle(struct task_struct *idle, int cpu);
|
||||
extern void init_idle_bootup_task(struct task_struct *idle);
|
||||
|
||||
extern int runqueue_is_locked(void);
|
||||
|
||||
extern cpumask_t nohz_cpu_mask;
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||
extern int select_nohz_load_balancer(int cpu);
|
||||
@@ -784,6 +785,8 @@ struct sched_domain {
|
||||
unsigned int balance_interval; /* initialise to 1. units in ms. */
|
||||
unsigned int nr_balance_failed; /* initialise to 0 */
|
||||
|
||||
u64 last_update;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
/* load_balance() stats */
|
||||
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
|
||||
@@ -823,23 +826,6 @@ extern int arch_reinit_sched_domains(void);
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* A runqueue laden with a single nice 0 task scores a weighted_cpuload of
|
||||
* SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
|
||||
* task of nice 0 or enough lower priority tasks to bring up the
|
||||
* weighted_cpuload
|
||||
*/
|
||||
static inline int above_background_load(void)
|
||||
{
|
||||
unsigned long cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct io_context; /* See blkdev.h */
|
||||
#define NGROUPS_SMALL 32
|
||||
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
|
||||
@@ -921,8 +907,8 @@ struct sched_class {
|
||||
void (*set_cpus_allowed)(struct task_struct *p,
|
||||
const cpumask_t *newmask);
|
||||
|
||||
void (*join_domain)(struct rq *rq);
|
||||
void (*leave_domain)(struct rq *rq);
|
||||
void (*rq_online)(struct rq *rq);
|
||||
void (*rq_offline)(struct rq *rq);
|
||||
|
||||
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
|
||||
int running);
|
||||
@@ -1039,6 +1025,7 @@ struct task_struct {
|
||||
#endif
|
||||
|
||||
int prio, static_prio, normal_prio;
|
||||
unsigned int rt_priority;
|
||||
const struct sched_class *sched_class;
|
||||
struct sched_entity se;
|
||||
struct sched_rt_entity rt;
|
||||
@@ -1075,12 +1062,6 @@ struct task_struct {
|
||||
#endif
|
||||
|
||||
struct list_head tasks;
|
||||
/*
|
||||
* ptrace_list/ptrace_children forms the list of my children
|
||||
* that were stolen by a ptracer.
|
||||
*/
|
||||
struct list_head ptrace_children;
|
||||
struct list_head ptrace_list;
|
||||
|
||||
struct mm_struct *mm, *active_mm;
|
||||
|
||||
@@ -1102,18 +1083,25 @@ struct task_struct {
|
||||
/*
|
||||
* pointers to (original) parent process, youngest child, younger sibling,
|
||||
* older sibling, respectively. (p->father can be replaced with
|
||||
* p->parent->pid)
|
||||
* p->real_parent->pid)
|
||||
*/
|
||||
struct task_struct *real_parent; /* real parent process (when being debugged) */
|
||||
struct task_struct *parent; /* parent process */
|
||||
struct task_struct *real_parent; /* real parent process */
|
||||
struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
|
||||
/*
|
||||
* children/sibling forms the list of my children plus the
|
||||
* tasks I'm ptracing.
|
||||
* children/sibling forms the list of my natural children
|
||||
*/
|
||||
struct list_head children; /* list of my children */
|
||||
struct list_head sibling; /* linkage in my parent's children list */
|
||||
struct task_struct *group_leader; /* threadgroup leader */
|
||||
|
||||
/*
|
||||
* ptraced is the list of tasks this task is using ptrace on.
|
||||
* This includes both natural children and PTRACE_ATTACH targets.
|
||||
* p->ptrace_entry is p's link on the p->parent->ptraced list.
|
||||
*/
|
||||
struct list_head ptraced;
|
||||
struct list_head ptrace_entry;
|
||||
|
||||
/* PID/PID hash table linkage. */
|
||||
struct pid_link pids[PIDTYPE_MAX];
|
||||
struct list_head thread_group;
|
||||
@@ -1122,7 +1110,6 @@ struct task_struct {
|
||||
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
||||
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
|
||||
|
||||
unsigned int rt_priority;
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
cputime_t gtime;
|
||||
cputime_t prev_utime, prev_stime;
|
||||
@@ -1141,12 +1128,12 @@ struct task_struct {
|
||||
gid_t gid,egid,sgid,fsgid;
|
||||
struct group_info *group_info;
|
||||
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
|
||||
unsigned securebits;
|
||||
struct user_struct *user;
|
||||
unsigned securebits;
|
||||
#ifdef CONFIG_KEYS
|
||||
unsigned char jit_keyring; /* default keyring to attach requested keys to */
|
||||
struct key *request_key_auth; /* assumed request_key authority */
|
||||
struct key *thread_keyring; /* keyring private to this thread */
|
||||
unsigned char jit_keyring; /* default keyring to attach requested keys to */
|
||||
#endif
|
||||
char comm[TASK_COMM_LEN]; /* executable name excluding path
|
||||
- access with [gs]et_task_comm (which lock
|
||||
@@ -1233,8 +1220,8 @@ struct task_struct {
|
||||
# define MAX_LOCK_DEPTH 48UL
|
||||
u64 curr_chain_key;
|
||||
int lockdep_depth;
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
unsigned int lockdep_recursion;
|
||||
struct held_lock held_locks[MAX_LOCK_DEPTH];
|
||||
#endif
|
||||
|
||||
/* journalling filesystem info */
|
||||
@@ -1262,10 +1249,6 @@ struct task_struct {
|
||||
u64 acct_vm_mem1; /* accumulated virtual memory usage */
|
||||
cputime_t acct_stimexpd;/* stime since last update */
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
struct mempolicy *mempolicy;
|
||||
short il_next;
|
||||
#endif
|
||||
#ifdef CONFIG_CPUSETS
|
||||
nodemask_t mems_allowed;
|
||||
int cpuset_mems_generation;
|
||||
@@ -1284,6 +1267,10 @@ struct task_struct {
|
||||
#endif
|
||||
struct list_head pi_state_list;
|
||||
struct futex_pi_state *pi_state_cache;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
struct mempolicy *mempolicy;
|
||||
short il_next;
|
||||
#endif
|
||||
atomic_t fs_excl; /* holding fs exclusive resources */
|
||||
struct rcu_head rcu;
|
||||
@@ -1504,9 +1491,11 @@ static inline void put_task_struct(struct task_struct *t)
|
||||
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
|
||||
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
|
||||
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
|
||||
#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
|
||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
|
||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
@@ -1573,13 +1562,28 @@ static inline void sched_clock_idle_sleep_event(void)
|
||||
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
}
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
static inline void sched_clock_tick_stop(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_tick_start(int cpu)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
#ifdef CONFIG_NO_HZ
|
||||
extern void sched_clock_tick_stop(int cpu);
|
||||
extern void sched_clock_tick_start(int cpu);
|
||||
#endif
|
||||
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
|
||||
/*
|
||||
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
||||
@@ -1622,6 +1626,7 @@ extern unsigned int sysctl_sched_child_runs_first;
|
||||
extern unsigned int sysctl_sched_features;
|
||||
extern unsigned int sysctl_sched_migration_cost;
|
||||
extern unsigned int sysctl_sched_nr_migrate;
|
||||
extern unsigned int sysctl_sched_shares_ratelimit;
|
||||
|
||||
int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
struct file *file, void __user *buffer, size_t *length,
|
||||
@@ -1655,6 +1660,8 @@ extern int can_nice(const struct task_struct *p, const int nice);
|
||||
extern int task_curr(const struct task_struct *p);
|
||||
extern int idle_cpu(int cpu);
|
||||
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
|
||||
extern int sched_setscheduler_nocheck(struct task_struct *, int,
|
||||
struct sched_param *);
|
||||
extern struct task_struct *idle_task(int cpu);
|
||||
extern struct task_struct *curr_task(int cpu);
|
||||
extern void set_curr_task(int cpu, struct task_struct *p);
|
||||
@@ -1870,9 +1877,6 @@ extern void wait_task_inactive(struct task_struct * p);
|
||||
#define wait_task_inactive(p) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define remove_parent(p) list_del_init(&(p)->sibling)
|
||||
#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
|
||||
|
||||
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
|
||||
|
||||
#define for_each_process(p) \
|
||||
@@ -2131,6 +2135,18 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void
|
||||
__trace_special(void *__tr, void *__data,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3);
|
||||
#else
|
||||
static inline void
|
||||
__trace_special(void *__tr, void *__data,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
|
||||
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
|
||||
|
||||
@@ -2225,6 +2241,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
|
||||
}
|
||||
#endif /* CONFIG_MM_OWNER */
|
||||
|
||||
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
@@ -46,7 +46,8 @@ struct audit_krule;
|
||||
*/
|
||||
extern int cap_capable(struct task_struct *tsk, int cap);
|
||||
extern int cap_settime(struct timespec *ts, struct timezone *tz);
|
||||
extern int cap_ptrace(struct task_struct *parent, struct task_struct *child);
|
||||
extern int cap_ptrace(struct task_struct *parent, struct task_struct *child,
|
||||
unsigned int mode);
|
||||
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
|
||||
extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
|
||||
extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
|
||||
@@ -79,6 +80,7 @@ struct xfrm_selector;
|
||||
struct xfrm_policy;
|
||||
struct xfrm_state;
|
||||
struct xfrm_user_sec_ctx;
|
||||
struct seq_file;
|
||||
|
||||
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
|
||||
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
|
||||
@@ -289,10 +291,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
|
||||
* Update module state after a successful pivot.
|
||||
* @old_path contains the path for the old root.
|
||||
* @new_path contains the path for the new root.
|
||||
* @sb_get_mnt_opts:
|
||||
* Get the security relevant mount options used for a superblock
|
||||
* @sb the superblock to get security mount options from
|
||||
* @opts binary data structure containing all lsm mount data
|
||||
* @sb_set_mnt_opts:
|
||||
* Set the security relevant mount options used for a superblock
|
||||
* @sb the superblock to set security mount options for
|
||||
@@ -1170,6 +1168,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
|
||||
* attributes would be changed by the execve.
|
||||
* @parent contains the task_struct structure for parent process.
|
||||
* @child contains the task_struct structure for child process.
|
||||
* @mode contains the PTRACE_MODE flags indicating the form of access.
|
||||
* Return 0 if permission is granted.
|
||||
* @capget:
|
||||
* Get the @effective, @inheritable, and @permitted capability sets for
|
||||
@@ -1240,11 +1239,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
|
||||
* @pages contains the number of pages.
|
||||
* Return 0 if permission is granted.
|
||||
*
|
||||
* @register_security:
|
||||
* allow module stacking.
|
||||
* @name contains the name of the security module being stacked.
|
||||
* @ops contains a pointer to the struct security_operations of the module to stack.
|
||||
*
|
||||
* @secid_to_secctx:
|
||||
* Convert secid to security context.
|
||||
* @secid contains the security ID.
|
||||
@@ -1295,7 +1289,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
|
||||
struct security_operations {
|
||||
char name[SECURITY_NAME_MAX + 1];
|
||||
|
||||
int (*ptrace) (struct task_struct *parent, struct task_struct *child);
|
||||
int (*ptrace) (struct task_struct *parent, struct task_struct *child,
|
||||
unsigned int mode);
|
||||
int (*capget) (struct task_struct *target,
|
||||
kernel_cap_t *effective,
|
||||
kernel_cap_t *inheritable, kernel_cap_t *permitted);
|
||||
@@ -1328,6 +1323,7 @@ struct security_operations {
|
||||
void (*sb_free_security) (struct super_block *sb);
|
||||
int (*sb_copy_data) (char *orig, char *copy);
|
||||
int (*sb_kern_mount) (struct super_block *sb, void *data);
|
||||
int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
|
||||
int (*sb_statfs) (struct dentry *dentry);
|
||||
int (*sb_mount) (char *dev_name, struct path *path,
|
||||
char *type, unsigned long flags, void *data);
|
||||
@@ -1343,8 +1339,6 @@ struct security_operations {
|
||||
struct path *new_path);
|
||||
void (*sb_post_pivotroot) (struct path *old_path,
|
||||
struct path *new_path);
|
||||
int (*sb_get_mnt_opts) (const struct super_block *sb,
|
||||
struct security_mnt_opts *opts);
|
||||
int (*sb_set_mnt_opts) (struct super_block *sb,
|
||||
struct security_mnt_opts *opts);
|
||||
void (*sb_clone_mnt_opts) (const struct super_block *oldsb,
|
||||
@@ -1472,10 +1466,6 @@ struct security_operations {
|
||||
int (*netlink_send) (struct sock *sk, struct sk_buff *skb);
|
||||
int (*netlink_recv) (struct sk_buff *skb, int cap);
|
||||
|
||||
/* allow module stacking */
|
||||
int (*register_security) (const char *name,
|
||||
struct security_operations *ops);
|
||||
|
||||
void (*d_instantiate) (struct dentry *dentry, struct inode *inode);
|
||||
|
||||
int (*getprocattr) (struct task_struct *p, char *name, char **value);
|
||||
@@ -1565,7 +1555,6 @@ struct security_operations {
|
||||
extern int security_init(void);
|
||||
extern int security_module_enable(struct security_operations *ops);
|
||||
extern int register_security(struct security_operations *ops);
|
||||
extern int mod_reg_security(const char *name, struct security_operations *ops);
|
||||
extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
|
||||
struct dentry *parent, void *data,
|
||||
const struct file_operations *fops);
|
||||
@@ -1573,7 +1562,8 @@ extern struct dentry *securityfs_create_dir(const char *name, struct dentry *par
|
||||
extern void securityfs_remove(struct dentry *dentry);
|
||||
|
||||
/* Security operations */
|
||||
int security_ptrace(struct task_struct *parent, struct task_struct *child);
|
||||
int security_ptrace(struct task_struct *parent, struct task_struct *child,
|
||||
unsigned int mode);
|
||||
int security_capget(struct task_struct *target,
|
||||
kernel_cap_t *effective,
|
||||
kernel_cap_t *inheritable,
|
||||
@@ -1606,6 +1596,7 @@ int security_sb_alloc(struct super_block *sb);
|
||||
void security_sb_free(struct super_block *sb);
|
||||
int security_sb_copy_data(char *orig, char *copy);
|
||||
int security_sb_kern_mount(struct super_block *sb, void *data);
|
||||
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
|
||||
int security_sb_statfs(struct dentry *dentry);
|
||||
int security_sb_mount(char *dev_name, struct path *path,
|
||||
char *type, unsigned long flags, void *data);
|
||||
@@ -1617,8 +1608,6 @@ void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *d
|
||||
void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint);
|
||||
int security_sb_pivotroot(struct path *old_path, struct path *new_path);
|
||||
void security_sb_post_pivotroot(struct path *old_path, struct path *new_path);
|
||||
int security_sb_get_mnt_opts(const struct super_block *sb,
|
||||
struct security_mnt_opts *opts);
|
||||
int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts);
|
||||
void security_sb_clone_mnt_opts(const struct super_block *oldsb,
|
||||
struct super_block *newsb);
|
||||
@@ -1755,9 +1744,11 @@ static inline int security_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_ptrace(struct task_struct *parent, struct task_struct *child)
|
||||
static inline int security_ptrace(struct task_struct *parent,
|
||||
struct task_struct *child,
|
||||
unsigned int mode)
|
||||
{
|
||||
return cap_ptrace(parent, child);
|
||||
return cap_ptrace(parent, child, mode);
|
||||
}
|
||||
|
||||
static inline int security_capget(struct task_struct *target,
|
||||
@@ -1881,6 +1872,12 @@ static inline int security_sb_kern_mount(struct super_block *sb, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_sb_show_options(struct seq_file *m,
|
||||
struct super_block *sb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_sb_statfs(struct dentry *dentry)
|
||||
{
|
||||
return 0;
|
||||
@@ -1927,12 +1924,6 @@ static inline int security_sb_pivotroot(struct path *old_path,
|
||||
static inline void security_sb_post_pivotroot(struct path *old_path,
|
||||
struct path *new_path)
|
||||
{ }
|
||||
static inline int security_sb_get_mnt_opts(const struct super_block *sb,
|
||||
struct security_mnt_opts *opts)
|
||||
{
|
||||
security_init_mnt_opts(opts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_sb_set_mnt_opts(struct super_block *sb,
|
||||
struct security_mnt_opts *opts)
|
||||
|
@@ -7,9 +7,18 @@
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
extern void cpu_idle(void);
|
||||
|
||||
struct call_single_data {
|
||||
struct list_head list;
|
||||
void (*func) (void *info);
|
||||
void *info;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/preempt.h>
|
||||
@@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus);
|
||||
/*
|
||||
* Call a function on all other processors
|
||||
*/
|
||||
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
|
||||
|
||||
int smp_call_function(void(*func)(void *info), void *info, int wait);
|
||||
int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
|
||||
int wait);
|
||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||
int retry, int wait);
|
||||
int wait);
|
||||
void __smp_call_function_single(int cpuid, struct call_single_data *data);
|
||||
|
||||
/*
|
||||
* Generic and arch helpers
|
||||
*/
|
||||
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
||||
void generic_smp_call_function_single_interrupt(void);
|
||||
void generic_smp_call_function_interrupt(void);
|
||||
void init_call_single_data(void);
|
||||
void ipi_call_lock(void);
|
||||
void ipi_call_unlock(void);
|
||||
void ipi_call_lock_irq(void);
|
||||
void ipi_call_unlock_irq(void);
|
||||
#else
|
||||
static inline void init_call_single_data(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Call a function on all processors
|
||||
*/
|
||||
int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
|
||||
int on_each_cpu(void (*func) (void *info), void *info, int wait);
|
||||
|
||||
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
|
||||
#define MSG_ALL 0x8001
|
||||
@@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#define smp_call_function(func, info, retry, wait) \
|
||||
#define smp_call_function(func, info, wait) \
|
||||
(up_smp_call_function(func, info))
|
||||
#define on_each_cpu(func,info,retry,wait) \
|
||||
#define on_each_cpu(func,info,wait) \
|
||||
({ \
|
||||
local_irq_disable(); \
|
||||
func(info); \
|
||||
@@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
|
||||
static inline void smp_send_reschedule(int cpu) { }
|
||||
#define num_booting_cpus() 1
|
||||
#define smp_prepare_boot_cpu() do {} while (0)
|
||||
#define smp_call_function_single(cpuid, func, info, retry, wait) \
|
||||
#define smp_call_function_single(cpuid, func, info, wait) \
|
||||
({ \
|
||||
WARN_ON(cpuid != 0); \
|
||||
local_irq_disable(); \
|
||||
@@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { }
|
||||
})
|
||||
#define smp_call_function_mask(mask, func, info, wait) \
|
||||
(up_smp_call_function(func, info))
|
||||
|
||||
static inline void init_call_single_data(void)
|
||||
{
|
||||
}
|
||||
#endif /* !SMP */
|
||||
|
||||
/*
|
||||
|
@@ -27,11 +27,24 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
|
||||
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
|
||||
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
|
||||
|
||||
/*
|
||||
* Various legacy drivers don't really need the BKL in a specific
|
||||
* function, but they *do* need to know that the BKL became available.
|
||||
* This function just avoids wrapping a bunch of lock/unlock pairs
|
||||
* around code which doesn't really need it.
|
||||
*/
|
||||
static inline void cycle_kernel_lock(void)
|
||||
{
|
||||
lock_kernel();
|
||||
unlock_kernel();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define lock_kernel() do { } while(0)
|
||||
#define unlock_kernel() do { } while(0)
|
||||
#define release_kernel_lock(task) do { } while(0)
|
||||
#define cycle_kernel_lock() do { } while(0)
|
||||
#define reacquire_kernel_lock(task) 0
|
||||
#define kernel_locked() 1
|
||||
|
||||
|
@@ -23,6 +23,15 @@ struct mmc_spi_platform_data {
|
||||
/* sense switch on sd cards */
|
||||
int (*get_ro)(struct device *);
|
||||
|
||||
/*
|
||||
* If board does not use CD interrupts, driver can optimize polling
|
||||
* using this function.
|
||||
*/
|
||||
int (*get_cd)(struct device *);
|
||||
|
||||
/* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
|
||||
unsigned long caps;
|
||||
|
||||
/* how long to debounce card detect, in msecs */
|
||||
u16 detect_delay;
|
||||
|
||||
|
@@ -42,7 +42,8 @@ struct rpc_clnt {
|
||||
|
||||
unsigned int cl_softrtry : 1,/* soft timeouts */
|
||||
cl_discrtry : 1,/* disconnect before retry */
|
||||
cl_autobind : 1;/* use getport() */
|
||||
cl_autobind : 1,/* use getport() */
|
||||
cl_chatty : 1;/* be verbose */
|
||||
|
||||
struct rpc_rtt * cl_rtt; /* RTO estimator data */
|
||||
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
|
||||
@@ -114,6 +115,7 @@ struct rpc_create_args {
|
||||
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
|
||||
#define RPC_CLNT_CREATE_NOPING (1UL << 4)
|
||||
#define RPC_CLNT_CREATE_DISCRTRY (1UL << 5)
|
||||
#define RPC_CLNT_CREATE_QUIET (1UL << 6)
|
||||
|
||||
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
|
||||
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
|
||||
@@ -123,6 +125,9 @@ void rpc_shutdown_client(struct rpc_clnt *);
|
||||
void rpc_release_client(struct rpc_clnt *);
|
||||
|
||||
int rpcb_register(u32, u32, int, unsigned short, int *);
|
||||
int rpcb_v4_register(const u32 program, const u32 version,
|
||||
const struct sockaddr *address,
|
||||
const char *netid, int *result);
|
||||
int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int);
|
||||
void rpcb_getport_async(struct rpc_task *);
|
||||
|
||||
|
@@ -135,7 +135,6 @@ struct rpc_task_setup {
|
||||
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
|
||||
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
|
||||
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
|
||||
#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
|
||||
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
|
||||
|
||||
#define RPC_TASK_RUNNING 0
|
||||
|
@@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t;
|
||||
* that implement @begin(), but platforms implementing @begin() should
|
||||
* also provide a @end() which cleans up transitions aborted before
|
||||
* @enter().
|
||||
*
|
||||
* @recover: Recover the platform from a suspend failure.
|
||||
* Called by the PM core if the suspending of devices fails.
|
||||
* This callback is optional and should only be implemented by platforms
|
||||
* which require special recovery actions in that situation.
|
||||
*/
|
||||
struct platform_suspend_ops {
|
||||
int (*valid)(suspend_state_t state);
|
||||
@@ -94,6 +99,7 @@ struct platform_suspend_ops {
|
||||
int (*enter)(suspend_state_t state);
|
||||
void (*finish)(void);
|
||||
void (*end)(void);
|
||||
void (*recover)(void);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
@@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone);
|
||||
* The methods in this structure allow a platform to carry out special
|
||||
* operations required by it during a hibernation transition.
|
||||
*
|
||||
* All the methods below must be implemented.
|
||||
* All the methods below, except for @recover(), must be implemented.
|
||||
*
|
||||
* @begin: Tell the platform driver that we're starting hibernation.
|
||||
* Called right after shrinking memory and before freezing devices.
|
||||
@@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone);
|
||||
* @restore_cleanup: Clean up after a failing image restoration.
|
||||
* Called right after the nonboot CPUs have been enabled and before
|
||||
* thawing devices (runs with IRQs on).
|
||||
*
|
||||
* @recover: Recover the platform from a failure to suspend devices.
|
||||
* Called by the PM core if the suspending of devices during hibernation
|
||||
* fails. This callback is optional and should only be implemented by
|
||||
* platforms which require special recovery actions in that situation.
|
||||
*/
|
||||
struct platform_hibernation_ops {
|
||||
int (*begin)(void);
|
||||
@@ -200,6 +211,7 @@ struct platform_hibernation_ops {
|
||||
void (*leave)(void);
|
||||
int (*pre_restore)(void);
|
||||
void (*restore_cleanup)(void);
|
||||
void (*recover)(void);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
|
@@ -179,4 +179,17 @@ void arch_update_cpu_topology(void);
|
||||
#endif
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifndef topology_physical_package_id
|
||||
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
|
||||
#endif
|
||||
#ifndef topology_core_id
|
||||
#define topology_core_id(cpu) ((void)(cpu), 0)
|
||||
#endif
|
||||
#ifndef topology_thread_siblings
|
||||
#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
|
||||
#endif
|
||||
#ifndef topology_core_siblings
|
||||
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_TOPOLOGY_H */
|
||||
|
@@ -63,6 +63,7 @@ struct writeback_control {
|
||||
unsigned for_writepages:1; /* This is a writepages() call */
|
||||
unsigned range_cyclic:1; /* range_start is cyclic */
|
||||
unsigned more_io:1; /* more io to be dispatched */
|
||||
unsigned range_cont:1;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -105,6 +106,8 @@ extern int vm_highmem_is_dirtyable;
|
||||
extern int block_dump;
|
||||
extern int laptop_mode;
|
||||
|
||||
extern unsigned long determine_dirtyable_memory(void);
|
||||
|
||||
extern int dirty_ratio_handler(struct ctl_table *table, int write,
|
||||
struct file *filp, void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
Reference in New Issue
Block a user