Merge branch 'percpu-for-linus' into percpu-for-next

Conflicts:
	arch/sparc/kernel/smp_64.c
	arch/x86/kernel/cpu/perf_counter.c
	arch/x86/kernel/setup_percpu.c
	drivers/cpufreq/cpufreq_ondemand.c
	mm/percpu.c

Conflicts in core and arch percpu codes are mostly from commit
ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many
num_possible_cpus() with nr_cpu_ids.  As for-next branch has moved all
the first chunk allocators into mm/percpu.c, the changes are moved
from arch code to mm/percpu.c.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo
2009-08-14 14:41:02 +09:00
1985 changed files with 49463 additions and 28163 deletions

View File

@@ -229,9 +229,14 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << BDI_async_congested));
}
void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
void set_bdi_congested(struct backing_dev_info *bdi, int rw);
long congestion_wait(int rw, long timeout);
enum {
BLK_RW_ASYNC = 0,
BLK_RW_SYNC = 1,
};
void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)

View File

@@ -70,11 +70,6 @@ enum rq_cmd_type_bits {
REQ_TYPE_ATA_PC,
};
enum {
BLK_RW_ASYNC = 0,
BLK_RW_SYNC = 1,
};
/*
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -723,6 +718,7 @@ struct rq_map_data {
int nr_entries;
unsigned long offset;
int null_mapped;
int from_user;
};
struct req_iterator {
@@ -779,18 +775,18 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
*/
static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
{
clear_bdi_congested(&q->backing_dev_info, rw);
clear_bdi_congested(&q->backing_dev_info, sync);
}
/*
* A queue has just entered congestion. Flag that in the queue's VM-visible
* state flags and increment the global gounter of congested queues.
*/
static inline void blk_set_queue_congested(struct request_queue *q, int rw)
static inline void blk_set_queue_congested(struct request_queue *q, int sync)
{
set_bdi_congested(&q->backing_dev_info, rw);
set_bdi_congested(&q->backing_dev_info, sync);
}
extern void blk_start_queue(struct request_queue *q);
@@ -917,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short)
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);

View File

@@ -140,29 +140,6 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
#include <linux/highmem.h>
#include <linux/scatterlist.h>
/**
* cb710_sg_miter_stop_writing - stop mapping iteration after writing
* @miter: sg mapping iter to be stopped
*
* Description:
* Stops mapping iterator @miter. @miter should have been started
* started using sg_miter_start(). A stopped iteration can be
* resumed by calling sg_miter_next() on it. This is useful when
* resources (kmap) need to be released during iteration.
*
* This is a convenience wrapper that will be optimized out for arches
* that don't need flush_kernel_dcache_page().
*
* Context:
* IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
*/
static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
{
if (miter->page)
flush_kernel_dcache_page(miter->page);
sg_miter_stop(miter);
}
/*
* 32-bit PIO mapping sg iterator
*
@@ -171,12 +148,12 @@ static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
* without DMA support).
*
* Best-case reading (transfer from device):
* sg_miter_start();
* sg_miter_start(, SG_MITER_TO_SG);
* cb710_sg_dwiter_write_from_io();
* cb710_sg_miter_stop_writing();
* sg_miter_stop();
*
* Best-case writing (transfer to device):
* sg_miter_start();
* sg_miter_start(, SG_MITER_FROM_SG);
* cb710_sg_dwiter_read_to_io();
* sg_miter_stop();
*/

View File

@@ -179,14 +179,11 @@ struct cgroup {
*/
struct list_head release_list;
/* pids_mutex protects the fields below */
/* pids_mutex protects pids_list and cached pid arrays. */
struct rw_semaphore pids_mutex;
/* Array of process ids in the cgroup */
pid_t *tasks_pids;
/* How many files are using the current tasks_pids array */
int pids_use_count;
/* Length of the current tasks_pids array */
int pids_length;
/* Linked list of struct cgroup_pids */
struct list_head pids_list;
/* For RCU-protected deletion */
struct rcu_head rcu_head;
@@ -365,6 +362,23 @@ int cgroup_task_count(const struct cgroup *cgrp);
/* Return true if cgrp is a descendant of the task's cgroup */
int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
/*
* When the subsys has to access css and may add permanent refcnt to css,
* it should take care of racy conditions with rmdir(). Following set of
* functions, is for stop/restart rmdir if necessary.
* Because these will call css_get/put, "css" should be alive css.
*
* cgroup_exclude_rmdir();
* ...do some jobs which may access arbitrary empty cgroup
* cgroup_release_and_wakeup_rmdir();
*
* When someone removes a cgroup while cgroup_exclude_rmdir() holds it,
* it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up.
*/
void cgroup_exclude_rmdir(struct cgroup_subsys_state *css);
void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css);
/*
* Control Group subsystem type.
* See Documentation/cgroups/cgroups.txt for details

View File

@@ -143,12 +143,3 @@ extern void clockevents_notify(unsigned long reason, void *arg);
#endif
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern ktime_t clockevents_get_next_event(int cpu);
#else
static inline ktime_t clockevents_get_next_event(int cpu)
{
return (ktime_t) { .tv64 = KTIME_MAX };
}
#endif

View File

@@ -293,7 +293,12 @@ static inline int clocksource_enable(struct clocksource *cs)
if (cs->enable)
ret = cs->enable(cs);
/* save mult_orig on enable */
/*
* The frequency may have changed while the clocksource
* was disabled. If so the code in ->enable() must update
* the mult value to reflect the new frequency. Make sure
* mult_orig follows this change.
*/
cs->mult_orig = cs->mult;
return ret;
@@ -309,6 +314,13 @@ static inline int clocksource_enable(struct clocksource *cs)
*/
static inline void clocksource_disable(struct clocksource *cs)
{
/*
* Save mult_orig in mult so clocksource_enable() can
* restore the value regardless if ->enable() updates
* the value of mult or not.
*/
cs->mult = cs->mult_orig;
if (cs->disable)
cs->disable(cs);
}

View File

@@ -89,7 +89,6 @@ struct vc_data {
unsigned int vc_need_wrap : 1;
unsigned int vc_can_do_color : 1;
unsigned int vc_report_mouse : 2;
unsigned int vc_kmalloced : 1;
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count;
int vc_utf_char;

View File

@@ -3,7 +3,6 @@
#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
#include <linux/smp_lock.h>
#include <linux/device.h>
#include <linux/proc_fs.h>

View File

@@ -1,31 +1,37 @@
#ifndef DECOMPRESS_GENERIC_H
#define DECOMPRESS_GENERIC_H
/* Minimal chunksize to be read.
*Bzip2 prefers at least 4096
*Lzma prefers 0x10000 */
#define COMPR_IOBUF_SIZE 4096
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
int(*fill)(void*, unsigned int),
int(*writebb)(void*, unsigned int),
unsigned char *output,
int(*flush)(void*, unsigned int),
unsigned char *outbuf,
int *posp,
void(*error)(char *x));
/* inbuf - input buffer
*len - len of pre-read data in inbuf
*fill - function to fill inbuf if empty
*writebb - function to write out outbug
*fill - function to fill inbuf when empty
*flush - function to write out outbuf
*outbuf - output buffer
*posp - if non-null, input position (number of bytes read) will be
* returned here
*
*If len != 0, the inbuf is initialized (with as much data), and fill
*should not be called
*If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
*fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
*If len != 0, inbuf should contain all the necessary input data, and fill
*should be NULL
*If len = 0, inbuf can be NULL, in which case the decompressor will allocate
*the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
*fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
*bytes should be read per call. Replace XXX with the appropriate decompressor
*name, i.e. LZMA_IOBUF_SIZE.
*
*If flush = NULL, outbuf must be large enough to buffer all the expected
*output. If flush != NULL, the output buffer will be allocated by the
*decompressor (outbuf = NULL), and the flush function will be called to
*flush the output buffer at the appropriate time (decompressor and stream
*dependent).
*/
/* Utility routine to detect the decompression method */
decompress_fn decompress_method(const unsigned char *inbuf, int len,
const char **name);

View File

@@ -84,7 +84,7 @@ typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
struct dm_dev *dev,
sector_t physical_start,
sector_t start, sector_t len,
void *data);
typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
@@ -104,7 +104,7 @@ void dm_error(const char *message);
* Combine device limits.
*/
int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
sector_t start, void *data);
sector_t start, sector_t len, void *data);
struct dm_dev {
struct block_device *bdev;

View File

@@ -25,8 +25,6 @@
#include <asm/atomic.h>
#include <asm/device.h>
#define BUS_ID_SIZE 20
struct device;
struct device_private;
struct device_driver;

View File

@@ -122,9 +122,10 @@ static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_r
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#ifdef ELF_CORE_COPY_TASK_REGS
#if defined (ELF_CORE_COPY_TASK_REGS)
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
#elif defined (task_pt_regs)
elf_core_copy_regs(elfregs, task_pt_regs(t));
#endif
return 0;
}

View File

@@ -874,7 +874,7 @@ struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
int create, int extend_disksize);
int create);
extern struct inode *ext3_iget(struct super_block *, unsigned long);
extern int ext3_write_inode (struct inode *, int);

View File

@@ -127,6 +127,7 @@ struct fw_card {
struct delayed_work work;
int bm_retries;
int bm_generation;
__be32 bm_transaction_data[2];
bool broadcast_channel_allocated;
u32 broadcast_channel;

View File

@@ -0,0 +1,47 @@
#ifndef _FLEX_ARRAY_H
#define _FLEX_ARRAY_H
#include <linux/types.h>
#include <asm/page.h>
#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
struct flex_array_part;
/*
* This is meant to replace cases where an array-like
* structure has gotten too big to fit into kmalloc()
* and the developer is getting tempted to use
* vmalloc().
*/
struct flex_array {
union {
struct {
int element_size;
int total_nr_elements;
struct flex_array_part *parts[0];
};
/*
* This little trick makes sure that
* sizeof(flex_array) == PAGE_SIZE
*/
char padding[FLEX_ARRAY_BASE_SIZE];
};
};
#define FLEX_ARRAY_INIT(size, total) { { {\
.element_size = (size), \
.total_nr_elements = (total), \
} } }
struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags);
int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags);
void flex_array_free(struct flex_array *fa);
void flex_array_free_parts(struct flex_array *fa);
int flex_array_put(struct flex_array *fa, int element_nr, void *src,
gfp_t flags);
void *flex_array_get(struct flex_array *fa, int element_nr);
#endif /* _FLEX_ARRAY_H */

View File

@@ -1946,6 +1946,7 @@ extern void putname(const char *name);
extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
@@ -2136,7 +2137,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
extern struct inode * inode_init_always(struct super_block *, struct inode *);
extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
extern void inode_add_to_lists(struct super_block *, struct inode *);
extern void iput(struct inode *);
@@ -2163,6 +2164,7 @@ extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void destroy_inode(struct inode *);
extern void __destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int should_remove_suid(struct dentry *);
extern int file_remove_suid(struct file *);

View File

@@ -352,7 +352,7 @@ extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
void *data, int data_is, const char *name,
u32 cookie);
u32 cookie, gfp_t gfp);
#else

View File

@@ -89,7 +89,9 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(int type, unsigned long len,
unsigned long flags, int pc);
@@ -119,11 +121,9 @@ struct ftrace_event_call {
void *filter;
void *mod;
#ifdef CONFIG_EVENT_PROFILE
atomic_t profile_count;
int (*profile_enable)(struct ftrace_event_call *);
void (*profile_disable)(struct ftrace_event_call *);
#endif
atomic_t profile_count;
int (*profile_enable)(struct ftrace_event_call *);
void (*profile_disable)(struct ftrace_event_call *);
};
#define MAX_FILTER_PRED 32

View File

@@ -2,7 +2,9 @@
#define LINUX_HARDIRQ_H
#include <linux/preempt.h>
#ifdef CONFIG_PREEMPT
#include <linux/smp_lock.h>
#endif
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <asm/hardirq.h>

View File

@@ -448,7 +448,7 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
{
if (likely(!timer->start_pid))
if (likely(!timer->start_site))
return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
timer->function, timer->start_comm, 0);

View File

@@ -78,6 +78,7 @@
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */

View File

@@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)

View File

@@ -183,5 +183,8 @@ extern struct cred init_cred;
LIST_HEAD_INIT(cpu_timers[2]), \
}
/* Attach to the init_task data structure for proper alignment */
#define __init_task_data __attribute__((__section__(".data.init_task")))
#endif

View File

@@ -0,0 +1,66 @@
#ifndef _MATRIX_KEYPAD_H
#define _MATRIX_KEYPAD_H
#include <linux/types.h>
#include <linux/input.h>
#define MATRIX_MAX_ROWS 16
#define MATRIX_MAX_COLS 16
#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
(((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
(val & 0xffff))
#define KEY_ROW(k) (((k) >> 24) & 0xff)
#define KEY_COL(k) (((k) >> 16) & 0xff)
#define KEY_VAL(k) ((k) & 0xffff)
#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
/**
* struct matrix_keymap_data - keymap for matrix keyboards
* @keymap: pointer to array of uint32 values encoded with KEY() macro
* representing keymap
* @keymap_size: number of entries (initialized) in this keymap
*
* This structure is supposed to be used by platform code to supply
* keymaps to drivers that implement matrix-like keypads/keyboards.
*/
struct matrix_keymap_data {
const uint32_t *keymap;
unsigned int keymap_size;
};
/**
* struct matrix_keypad_platform_data - platform-dependent keypad data
* @keymap_data: pointer to &matrix_keymap_data
* @row_gpios: pointer to array of gpio numbers representing rows
* @col_gpios: pointer to array of gpio numbers reporesenting colums
* @num_row_gpios: actual number of row gpios used by device
* @num_col_gpios: actual number of col gpios used by device
* @col_scan_delay_us: delay, measured in microseconds, that is
* needed before we can keypad after activating column gpio
* @debounce_ms: debounce interval in milliseconds
*
* This structure represents platform-specific data that use used by
* matrix_keypad driver to perform proper initialization.
*/
struct matrix_keypad_platform_data {
const struct matrix_keymap_data *keymap_data;
const unsigned int *row_gpios;
const unsigned int *col_gpios;
unsigned int num_row_gpios;
unsigned int num_col_gpios;
unsigned int col_scan_delay_us;
/* key debounce interval in milli-second */
unsigned int debounce_ms;
bool active_low;
bool wakeup;
};
#endif /* _MATRIX_KEYPAD_H */

View File

@@ -14,6 +14,7 @@
#include <linux/irqflags.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
@@ -64,11 +65,13 @@
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
* IRQTF_DIED - handler thread died
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
*/
enum {
IRQTF_RUNTHREAD,
IRQTF_DIED,
IRQTF_WARNED,
IRQTF_AFFINITY,
};
typedef irqreturn_t (*irq_handler_t)(int, void *);
@@ -517,6 +520,31 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
struct tasklet_hrtimer {
struct hrtimer timer;
struct tasklet_struct tasklet;
enum hrtimer_restart (*function)(struct hrtimer *);
};
extern void
tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t which_clock, enum hrtimer_mode mode);
static inline
int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
const enum hrtimer_mode mode)
{
return hrtimer_start(&ttimer->timer, time, mode);
}
static inline
void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
{
hrtimer_cancel(&ttimer->timer);
tasklet_kill(&ttimer->tasklet);
}
/*
* Autoprobing for irqs:
*

View File

@@ -92,7 +92,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* a race).
*/
if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
atomic_long_inc(&ioc->refcount);
atomic_inc(&ioc->nr_tasks);
return ioc;
}

View File

@@ -27,6 +27,7 @@ extern void kmemleak_init(void);
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp);
extern void kmemleak_free(const void *ptr);
extern void kmemleak_free_part(const void *ptr, size_t size);
extern void kmemleak_padding(const void *ptr, unsigned long offset,
size_t size);
extern void kmemleak_not_leak(const void *ptr);
@@ -71,6 +72,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
static inline void kmemleak_free(const void *ptr)
{
}
static inline void kmemleak_free_part(const void *ptr, size_t size)
{
}
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
{
}

View File

@@ -110,6 +110,7 @@ struct kvm_memory_slot {
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
int (*set)(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level);
union {

View File

@@ -1,5 +1,7 @@
/* Things the lguest guest needs to know. Note: like all lguest interfaces,
* this is subject to wild and random change between versions. */
/*
* Things the lguest guest needs to know. Note: like all lguest interfaces,
* this is subject to wild and random change between versions.
*/
#ifndef _LINUX_LGUEST_H
#define _LINUX_LGUEST_H
@@ -11,32 +13,41 @@
#define LG_CLOCK_MIN_DELTA 100UL
#define LG_CLOCK_MAX_DELTA ULONG_MAX
/*G:032 The second method of communicating with the Host is to via "struct
/*G:031
* The second method of communicating with the Host is to via "struct
* lguest_data". Once the Guest's initialization hypercall tells the Host where
* this is, the Guest and Host both publish information in it. :*/
struct lguest_data
{
/* 512 == enabled (same as eflags in normal hardware). The Guest
* changes interrupts so often that a hypercall is too slow. */
* this is, the Guest and Host both publish information in it.
:*/
struct lguest_data {
/*
* 512 == enabled (same as eflags in normal hardware). The Guest
* changes interrupts so often that a hypercall is too slow.
*/
unsigned int irq_enabled;
/* Fine-grained interrupt disabling by the Guest */
DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
/* The Host writes the virtual address of the last page fault here,
/*
* The Host writes the virtual address of the last page fault here,
* which saves the Guest a hypercall. CR2 is the native register where
* this address would normally be found. */
* this address would normally be found.
*/
unsigned long cr2;
/* Wallclock time set by the Host. */
struct timespec time;
/* Interrupt pending set by the Host. The Guest should do a hypercall
* if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */
/*
* Interrupt pending set by the Host. The Guest should do a hypercall
* if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
*/
int irq_pending;
/* Async hypercall ring. Instead of directly making hypercalls, we can
/*
* Async hypercall ring. Instead of directly making hypercalls, we can
* place them in here for processing the next time the Host wants.
* This batching can be quite efficient. */
* This batching can be quite efficient.
*/
/* 0xFF == done (set by Host), 0 == pending (set by Guest). */
u8 hcall_status[LHCALL_RING_SIZE];

View File

@@ -29,8 +29,10 @@ struct lguest_device_desc {
__u8 type;
/* The number of virtqueues (first in config array) */
__u8 num_vq;
/* The number of bytes of feature bits. Multiply by 2: one for host
* features and one for Guest acknowledgements. */
/*
* The number of bytes of feature bits. Multiply by 2: one for host
* features and one for Guest acknowledgements.
*/
__u8 feature_len;
/* The number of bytes of the config array after virtqueues. */
__u8 config_len;
@@ -39,8 +41,10 @@ struct lguest_device_desc {
__u8 config[0];
};
/*D:135 This is how we expect the device configuration field for a virtqueue
* to be laid out in config space. */
/*D:135
* This is how we expect the device configuration field for a virtqueue
* to be laid out in config space.
*/
struct lguest_vqconfig {
/* The number of entries in the virtio_ring */
__u16 num;
@@ -61,7 +65,9 @@ enum lguest_req
LHREQ_EVENTFD, /* + address, fd. */
};
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize for historical reasons. */
/*
* The alignment to use between consumer and producer parts of vring.
* x86 pagesize for historical reasons.
*/
#define LGUEST_VRING_ALIGN 4096
#endif /* _LINUX_LGUEST_LAUNCHER */

View File

@@ -385,6 +385,7 @@ enum {
not multiple of 16 bytes */
ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -588,6 +589,7 @@ struct ata_device {
#endif
/* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
u64 n_sectors; /* size of device, if ATA */
u64 n_native_sectors; /* native size, if ATA */
unsigned int class; /* ATA_DEV_xxx */
unsigned long unpark_deadline;

View File

@@ -21,6 +21,15 @@
#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE)
#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE)
/*
* For assembly routines.
*
* Note when using these that you must specify the appropriate
* alignment directives yourself
*/
#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw"
#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw"
/*
* This is used by architectures to keep arguments on the stack
* untouched by the compiler by keeping them live until the end.

View File

@@ -2,10 +2,9 @@
#define _NAMESPACE_H_
#ifdef __KERNEL__
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/nsproxy.h>
#include <linux/path.h>
#include <linux/seq_file.h>
#include <linux/wait.h>
struct mnt_namespace {
atomic_t count;
@@ -28,14 +27,6 @@ extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
static inline void exit_mnt_ns(struct task_struct *p)
{
struct mnt_namespace *ns = p->nsproxy->mnt_ns;
if (ns)
put_mnt_ns(ns);
}
static inline void get_mnt_ns(struct mnt_namespace *ns)
{
atomic_inc(&ns->count);

View File

@@ -251,7 +251,7 @@ struct mtd_info {
static inline struct mtd_info *dev_to_mtd(struct device *dev)
{
return dev ? container_of(dev, struct mtd_info, dev) : NULL;
return dev ? dev_get_drvdata(dev) : NULL;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)

View File

@@ -47,6 +47,8 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
struct mtd_info;
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);

View File

@@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
extern void nfs_writedata_release(void *);
/*
* Try to write back everything synchronously (but check the
@@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_write_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_write_data *wdata);
extern void nfs_commitdata_release(void *wdata);
#else
static inline int
nfs_commit_inode(struct inode *inode, int how)
@@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode)
* Allocate nfs_write_data structures
*/
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
extern void nfs_writedata_free(struct nfs_write_data *);
/*
* linux/fs/nfs/read.c
@@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *);
extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
extern void nfs_readdata_release(void *data);
extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
struct page *);
@@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
* Allocate nfs_read_data structures
*/
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
extern void nfs_readdata_free(struct nfs_read_data *);
/*
* linux/fs/nfs3proc.c

View File

@@ -82,6 +82,12 @@
* to generate slightly worse code. So use a simple one-line #define
* for node_isset(), instead of wrapping an inline inside a macro, the
* way we do the other calls.
*
* NODEMASK_SCRATCH
* When doing above logical AND, OR, XOR, Remap operations the callers tend to
* need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
* nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
* for such situations. See below and CPUMASK_ALLOC also.
*/
#include <linux/kernel.h>
@@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
* For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
*/
#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
#define NODEMASK_FREE(m) kfree(m)
#else
#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
#define NODEMASK_FREE(m)
#endif
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
};
#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
#endif /* __LINUX_NODEMASK_H */

View File

@@ -18,5 +18,8 @@ extern struct phy_device *of_phy_connect(struct net_device *dev,
struct device_node *phy_np,
void (*hndlr)(struct net_device *),
u32 flags, phy_interface_t iface);
extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
void (*hndlr)(struct net_device *),
phy_interface_t iface);
#endif /* __LINUX_OF_MDIO_H */

View File

@@ -1145,7 +1145,7 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
/* If you want to know what to call your pci_dev, ask this function.
* Again, it's a wrapper around the generic device.
*/
static inline const char *pci_name(struct pci_dev *pdev)
static inline const char *pci_name(const struct pci_dev *pdev)
{
return dev_name(&pdev->dev);
}

View File

@@ -115,26 +115,44 @@ enum perf_counter_sample_format {
PERF_SAMPLE_TID = 1U << 1,
PERF_SAMPLE_TIME = 1U << 2,
PERF_SAMPLE_ADDR = 1U << 3,
PERF_SAMPLE_GROUP = 1U << 4,
PERF_SAMPLE_READ = 1U << 4,
PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_ID = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_RAW = 1U << 10,
PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
};
/*
* Bits that can be set in attr.read_format to request that
* reads on the counter should return the indicated quantities,
* in increasing order of bit value, after the counter value.
* The format of the data returned by read() on a perf counter fd,
* as specified by attr.read_format:
*
* struct read_format {
* { u64 value;
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
* { u64 time_running; } && PERF_FORMAT_RUNNING
* { u64 id; } && PERF_FORMAT_ID
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
* { u64 time_running; } && PERF_FORMAT_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
*/
enum perf_counter_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -180,8 +198,9 @@ struct perf_counter_attr {
freq : 1, /* use freq, not period */
inherit_stat : 1, /* per task counts */
enable_on_exec : 1, /* next exec enables */
task : 1, /* trace fork/exit */
__reserved_1 : 51;
__reserved_1 : 50;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
@@ -310,18 +329,18 @@ enum perf_event_type {
/*
* struct {
* struct perf_event_header header;
* u64 time;
* u64 id;
* u64 sample_period;
* u32 pid, ppid;
* u32 tid, ptid;
* };
*/
PERF_EVENT_PERIOD = 4,
PERF_EVENT_EXIT = 4,
/*
* struct {
* struct perf_event_header header;
* u64 time;
* u64 id;
* u64 stream_id;
* };
*/
PERF_EVENT_THROTTLE = 5,
@@ -331,6 +350,7 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
* u32 tid, ptid;
* };
*/
PERF_EVENT_FORK = 7,
@@ -339,10 +359,8 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
* u32 pid, tid;
* u64 value;
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
* { u64 time_running; } && PERF_FORMAT_RUNNING
* { u64 parent_id; } && PERF_FORMAT_ID
*
* struct read_format values;
* };
*/
PERF_EVENT_READ = 8,
@@ -356,14 +374,28 @@ enum perf_event_type {
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
* { u64 id; } && PERF_SAMPLE_ID
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
* { u64 period; } && PERF_SAMPLE_PERIOD
*
* { u64 nr;
* { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
* { struct read_format values; } && PERF_SAMPLE_READ
*
* { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
*
* #
* # The RAW record below is opaque data wrt the ABI
* #
* # That is, the ABI doesn't make any promises wrt to
* # the stability of its content, it may vary depending
* # on event, hardware, kernel version and phase of
* # the moon.
* #
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
* #
*
* { u32 size;
* char data[size];}&& PERF_SAMPLE_RAW
* };
*/
PERF_EVENT_SAMPLE = 9,
@@ -409,6 +441,11 @@ struct perf_callchain_entry {
__u64 ip[PERF_MAX_STACK_DEPTH];
};
struct perf_raw_record {
u32 size;
void *data;
};
struct task_struct;
/**
@@ -677,10 +714,13 @@ struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
u64 period;
struct perf_raw_record *raw;
};
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
struct perf_sample_data *data);
extern void perf_counter_output(struct perf_counter *counter, int nmi,
struct perf_sample_data *data);
/*
* Return 1 for a software counter, 0 for a hardware counter

View File

@@ -40,7 +40,10 @@ enum {
* Security-relevant compatibility flags that must be
* cleared upon setuid or setgid exec:
*/
#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC|ADDR_NO_RANDOMIZE)
#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
ADDR_NO_RANDOMIZE | \
ADDR_COMPAT_LAYOUT | \
MMAP_PAGE_ZERO)
/*
* Personality types.

View File

@@ -22,6 +22,8 @@
#ifndef _PPS_H_
#define _PPS_H_
#include <linux/types.h>
#define PPS_VERSION "5.3.6"
#define PPS_MAX_SOURCES 16 /* should be enough... */

View File

@@ -7,7 +7,6 @@
#ifndef _LINUX_QUOTAOPS_
#define _LINUX_QUOTAOPS_
#include <linux/smp_lock.h>
#include <linux/fs.h>
static inline struct quota_info *sb_dqopt(struct super_block *sb)

View File

@@ -99,7 +99,6 @@ enum rfkill_user_states {
#undef RFKILL_STATE_UNBLOCKED
#undef RFKILL_STATE_HARD_BLOCKED
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -225,7 +224,7 @@ void rfkill_destroy(struct rfkill *rfkill);
* should be blocked) so that drivers need not keep track of the soft
* block state -- which they might not be able to.
*/
bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_set_sw_state - Set the internal rfkill software block state

View File

@@ -242,6 +242,8 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
*/
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
#define SG_MITER_FROM_SG (1 << 2) /* nop */
struct sg_mapping_iter {
/* the following three fields can be accessed directly */

View File

@@ -209,7 +209,7 @@ extern unsigned long long time_sync_thresh;
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
(task->flags & PF_FREEZING) == 0)
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
@@ -498,6 +498,15 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
/*
* Disable preemption until the scheduler is running.
* Reset by start_kernel()->sched_init()->init_idle().
*
* We include PREEMPT_ACTIVE to avoid cond_resched() from working
* before the scheduler is active -- see should_resched().
*/
#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime: thread group interval timers.
@@ -1671,6 +1680,7 @@ extern cputime_t task_gtime(struct task_struct *p);
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */

View File

@@ -1342,12 +1342,12 @@ static inline int skb_network_offset(const struct sk_buff *skb)
* shifting the start of the packet by 2 bytes. Drivers should do this
* with:
*
* skb_reserve(NET_IP_ALIGN);
* skb_reserve(skb, NET_IP_ALIGN);
*
* The downside to this alignment of the IP header is that the DMA is now
* unaligned. On some architectures the cost of an unaligned DMA is high
* and this cost outweighs the gains made by aligning the IP header.
*
*
* Since this trade off varies between architectures, we allow NET_IP_ALIGN
* to be overridden.
*/

View File

@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/kmemtrace.h>
#include <linux/kmemleak.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -233,6 +234,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
unsigned int order = get_order(size);
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
kmemleak_alloc(ret, size, 1, flags);
trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
return ret;

View File

@@ -132,6 +132,11 @@ do { \
#endif /*__raw_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.

View File

@@ -12,7 +12,6 @@
#include <linux/uio.h>
#include <asm/byteorder.h>
#include <linux/scatterlist.h>
#include <linux/smp_lock.h>
/*
* Buffer adjustment

View File

@@ -321,6 +321,8 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
const struct timespec __user *uts,
size_t sigsetsize);
asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
siginfo_t __user *uinfo);
asmlinkage long sys_kill(int pid, int sig);
asmlinkage long sys_tgkill(int tgid, int pid, int sig);
asmlinkage long sys_tkill(int pid, int sig);

View File

@@ -14,6 +14,8 @@
#ifndef _LINUX_SYSRQ_H
#define _LINUX_SYSRQ_H
#include <linux/errno.h>
struct pt_regs;
struct tty_struct;

View File

@@ -394,6 +394,7 @@ extern void __do_SAK(struct tty_struct *tty);
extern void disassociate_ctty(int priv);
extern void no_tty(void);
extern void tty_flip_buffer_push(struct tty_struct *tty);
extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_struct *tty);
extern void tty_buffer_flush(struct tty_struct *tty);
extern void tty_buffer_init(struct tty_struct *tty);

View File

@@ -144,7 +144,7 @@ struct tty_ldisc_ops {
struct tty_ldisc {
struct tty_ldisc_ops *ops;
int refcount;
atomic_t users;
};
#define TTY_LDISC_MAGIC 0x5403

View File

@@ -19,15 +19,6 @@ struct iovec
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
};
#ifdef __KERNEL__
struct kvec {
void *iov_base; /* and that should *never* hold a userland pointer */
size_t iov_len;
};
#endif
/*
* UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
*/
@@ -35,6 +26,13 @@ struct kvec {
#define UIO_FASTIOV 8
#define UIO_MAXIOV 1024
#ifdef __KERNEL__
struct kvec {
void *iov_base; /* and that should *never* hold a userland pointer */
size_t iov_len;
};
/*
* Total number of bytes covered by an iovec.
*
@@ -53,5 +51,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
}
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
#endif
#endif

View File

@@ -888,8 +888,6 @@ struct usb_driver {
* struct usb_device_driver - identifies USB device driver to usbcore
* @name: The driver name should be unique among USB drivers,
* and should normally be the same as the module name.
* @nodename: Callback to provide a naming hint for a possible
* device node to create.
* @probe: Called to see if the driver is willing to manage a particular
* device. If it is, probe returns zero and uses dev_set_drvdata()
* to associate driver-specific data with the device. If unwilling
@@ -924,6 +922,8 @@ extern struct bus_type usb_bus_type;
/**
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number
* @name: the usb class device name for this driver. Will show up in sysfs.
* @nodename: Callback to provide a naming hint for a possible
* device node to create.
* @fops: pointer to the struct file_operations of this driver.
* @minor_base: the start of the minor range for this driver.
*
@@ -1046,6 +1046,8 @@ typedef void (*usb_complete_t)(struct urb *);
* the device driver is saying that it provided this DMA address,
* which the host controller driver should use in preference to the
* transfer_buffer.
* @sg: scatter gather buffer list
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
* be broken up into chunks according to the current maximum packet
* size for the endpoint, which is a function of the configuration

View File

@@ -1,177 +0,0 @@
/*
* Intel Langwell USB OTG transceiver driver
* Copyright (C) 2008, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef __LANGWELL_OTG_H__
#define __LANGWELL_OTG_H__
/* notify transceiver driver about OTG events */
extern void langwell_update_transceiver(void);
/* HCD register bus driver */
extern int langwell_register_host(struct pci_driver *host_driver);
/* HCD unregister bus driver */
extern void langwell_unregister_host(struct pci_driver *host_driver);
/* DCD register bus driver */
extern int langwell_register_peripheral(struct pci_driver *client_driver);
/* DCD unregister bus driver */
extern void langwell_unregister_peripheral(struct pci_driver *client_driver);
/* No silent failure, output warning message */
extern void langwell_otg_nsf_msg(unsigned long message);
#define CI_USBCMD 0x30
# define USBCMD_RST BIT(1)
# define USBCMD_RS BIT(0)
#define CI_USBSTS 0x34
# define USBSTS_SLI BIT(8)
# define USBSTS_URI BIT(6)
# define USBSTS_PCI BIT(2)
#define CI_PORTSC1 0x74
# define PORTSC_PP BIT(12)
# define PORTSC_LS (BIT(11) | BIT(10))
# define PORTSC_SUSP BIT(7)
# define PORTSC_CCS BIT(0)
#define CI_HOSTPC1 0xb4
# define HOSTPC1_PHCD BIT(22)
#define CI_OTGSC 0xf4
# define OTGSC_DPIE BIT(30)
# define OTGSC_1MSE BIT(29)
# define OTGSC_BSEIE BIT(28)
# define OTGSC_BSVIE BIT(27)
# define OTGSC_ASVIE BIT(26)
# define OTGSC_AVVIE BIT(25)
# define OTGSC_IDIE BIT(24)
# define OTGSC_DPIS BIT(22)
# define OTGSC_1MSS BIT(21)
# define OTGSC_BSEIS BIT(20)
# define OTGSC_BSVIS BIT(19)
# define OTGSC_ASVIS BIT(18)
# define OTGSC_AVVIS BIT(17)
# define OTGSC_IDIS BIT(16)
# define OTGSC_DPS BIT(14)
# define OTGSC_1MST BIT(13)
# define OTGSC_BSE BIT(12)
# define OTGSC_BSV BIT(11)
# define OTGSC_ASV BIT(10)
# define OTGSC_AVV BIT(9)
# define OTGSC_ID BIT(8)
# define OTGSC_HABA BIT(7)
# define OTGSC_HADP BIT(6)
# define OTGSC_IDPU BIT(5)
# define OTGSC_DP BIT(4)
# define OTGSC_OT BIT(3)
# define OTGSC_HAAR BIT(2)
# define OTGSC_VC BIT(1)
# define OTGSC_VD BIT(0)
# define OTGSC_INTEN_MASK (0x7f << 24)
# define OTGSC_INTSTS_MASK (0x7f << 16)
#define CI_USBMODE 0xf8
# define USBMODE_CM (BIT(1) | BIT(0))
# define USBMODE_IDLE 0
# define USBMODE_DEVICE 0x2
# define USBMODE_HOST 0x3
#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
struct otg_hsm {
/* Input */
int a_bus_resume;
int a_bus_suspend;
int a_conn;
int a_sess_vld;
int a_srp_det;
int a_vbus_vld;
int b_bus_resume;
int b_bus_suspend;
int b_conn;
int b_se0_srp;
int b_sess_end;
int b_sess_vld;
int id;
/* Internal variables */
int a_set_b_hnp_en;
int b_srp_done;
int b_hnp_enable;
/* Timeout indicator for timers */
int a_wait_vrise_tmout;
int a_wait_bcon_tmout;
int a_aidl_bdis_tmout;
int b_ase0_brst_tmout;
int b_bus_suspend_tmout;
int b_srp_res_tmout;
/* Informative variables */
int a_bus_drop;
int a_bus_req;
int a_clr_err;
int a_suspend_req;
int b_bus_req;
/* Output */
int drv_vbus;
int loc_conn;
int loc_sof;
/* Others */
int b_bus_suspend_vld;
};
#define TA_WAIT_VRISE 100
#define TA_WAIT_BCON 30000
#define TA_AIDL_BDIS 15000
#define TB_ASE0_BRST 5000
#define TB_SE0_SRP 2
#define TB_SRP_RES 100
#define TB_BUS_SUSPEND 500
struct langwell_otg_timer {
unsigned long expires; /* Number of count increase to timeout */
unsigned long count; /* Tick counter */
void (*function)(unsigned long); /* Timeout function */
unsigned long data; /* Data passed to function */
struct list_head list;
};
struct langwell_otg {
struct otg_transceiver otg;
struct otg_hsm hsm;
void __iomem *regs;
unsigned region;
struct pci_driver *host_ops;
struct pci_driver *client_ops;
struct pci_dev *pdev;
struct work_struct work;
struct workqueue_struct *qwork;
spinlock_t lock;
spinlock_t wq_lock;
};
static inline struct langwell_otg *otg_to_langwell(struct otg_transceiver *otg)
{
return container_of(otg, struct langwell_otg, otg);
}
#ifdef DEBUG
#define otg_dbg(fmt, args...) \
printk(KERN_DEBUG fmt , ## args)
#else
#define otg_dbg(fmt, args...) \
do { } while (0)
#endif /* DEBUG */
#endif /* __LANGWELL_OTG_H__ */

View File

@@ -317,7 +317,8 @@ extern int usb_serial_generic_register(int debug);
extern void usb_serial_generic_deregister(void);
extern void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
gfp_t mem_flags);
extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
struct usb_serial_port *port,
unsigned int ch);
extern int usb_serial_handle_break(struct usb_serial_port *port);

View File

@@ -318,6 +318,8 @@ struct v4l2_pix_format {
/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
/*
* 10bit raw bayer, expanded to 16 bits
* xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
@@ -336,6 +338,7 @@ struct v4l2_pix_format {
/* Vendor-specific formats */
#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */
#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */
#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */

View File

@@ -20,8 +20,7 @@
#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
struct virtio_blk_config
{
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
__u64 capacity;
/* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
@@ -50,8 +49,7 @@ struct virtio_blk_config
#define VIRTIO_BLK_T_BARRIER 0x80000000
/* This is the first element of the read scatter-gather list. */
struct virtio_blk_outhdr
{
struct virtio_blk_outhdr {
/* VIRTIO_BLK_T* */
__u32 type;
/* io priority. */

View File

@@ -79,8 +79,7 @@
* the dev->feature bits if it wants.
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops
{
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,

View File

@@ -27,11 +27,11 @@
#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
struct virtio_net_config
{
struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
__u8 mac[6];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
@@ -40,8 +40,7 @@ struct virtio_net_config
/* This is the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header. */
struct virtio_net_hdr
{
struct virtio_net_hdr {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
__u8 flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
@@ -81,14 +80,19 @@ typedef __u8 virtio_net_ctrl_ack;
#define VIRTIO_NET_ERR 1
/*
* Control the RX mode, ie. promisucous and allmulti. PROMISC and
* ALLMULTI commands require an "out" sg entry containing a 1 byte
* state value, zero = disable, non-zero = enable. These commands
* are supported with the VIRTIO_NET_F_CTRL_RX feature.
* Control the RX mode, ie. promisucous, allmulti, etc...
* All commands require an "out" sg entry containing a 1 byte
* state value, zero = disable, non-zero = enable. Commands
* 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
* Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
*/
#define VIRTIO_NET_CTRL_RX 0
#define VIRTIO_NET_CTRL_RX_PROMISC 0
#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
#define VIRTIO_NET_CTRL_RX_ALLUNI 2
#define VIRTIO_NET_CTRL_RX_NOMULTI 3
#define VIRTIO_NET_CTRL_RX_NOUNI 4
#define VIRTIO_NET_CTRL_RX_NOBCAST 5
/*
* Control the MAC filter table.

View File

@@ -30,8 +30,7 @@
#define VIRTIO_RING_F_INDIRECT_DESC 28
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc
{
struct vring_desc {
/* Address (guest-physical). */
__u64 addr;
/* Length. */
@@ -42,24 +41,21 @@ struct vring_desc
__u16 next;
};
struct vring_avail
{
struct vring_avail {
__u16 flags;
__u16 idx;
__u16 ring[];
};
/* u32 is used here for ids for padding reasons. */
struct vring_used_elem
{
struct vring_used_elem {
/* Index of start of used descriptor chain. */
__u32 id;
/* Total length of the descriptor chain which was used (written to) */
__u32 len;
};
struct vring_used
{
struct vring_used {
__u16 flags;
__u16 idx;
struct vring_used_elem ring[];

View File

@@ -77,7 +77,14 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
extern void init_waitqueue_head(wait_queue_head_t *q);
extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
#define init_waitqueue_head(q) \
do { \
static struct lock_class_key __key; \
\
__init_waitqueue_head((q), &__key); \
} while (0)
#ifdef CONFIG_LOCKDEP
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \