Merge branch 'linus' into sched/core

Pick up the autogroups fix and other fixes.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2012-12-07 12:15:33 +01:00
1121 changed files with 13359 additions and 8083 deletions

View File

@@ -15,6 +15,7 @@ struct pt_regs;
#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
#define BUILD_BUG_ON_INVALID(e) (0)
#define BUILD_BUG_ON(condition)
#define BUILD_BUG() (0)
#else /* __CHECKER__ */

View File

@@ -335,8 +335,8 @@ const char *__clk_get_name(struct clk *clk);
struct clk_hw *__clk_get_hw(struct clk *clk);
u8 __clk_get_num_parents(struct clk *clk);
struct clk *__clk_get_parent(struct clk *clk);
inline int __clk_get_enable_count(struct clk *clk);
inline int __clk_get_prepare_count(struct clk *clk);
int __clk_get_enable_count(struct clk *clk);
int __clk_get_prepare_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
unsigned long __clk_get_flags(struct clk *clk);
int __clk_is_enabled(struct clk *clk);

View File

@@ -61,7 +61,7 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
const char *fmt, ...);
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
static struct _ddebug __used __aligned(8) \
static struct _ddebug __aligned(8) \
__attribute__((section("__verbose"))) name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \

View File

@@ -68,7 +68,7 @@ enum extcon_cable_name {
EXTCON_VIDEO_OUT,
EXTCON_MECHANICAL,
};
extern const char *extcon_cable_name[];
extern const char extcon_cable_name[][CABLE_NAME_MAX + 1];
struct extcon_cable;

View File

@@ -462,8 +462,6 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
/* A semaphore that prevents I/O while block size is being changed */
struct percpu_rw_semaphore bd_block_size_semaphore;
};
/*
@@ -2049,7 +2047,6 @@ extern void unregister_blkdev(unsigned int, const char *);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
extern sector_t blkdev_max_block(struct block_device *bdev);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern void invalidate_bdev(struct block_device *);
@@ -2379,8 +2376,6 @@ extern int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags);
/* fs/block_dev.c */
extern ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,

View File

@@ -30,9 +30,9 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_NOTRACK 0x100000u
#define ___GFP_OTHER_NODE 0x200000u
#define ___GFP_WRITE 0x400000u
/*
* GFP bitmasks..
@@ -94,7 +94,7 @@ struct vm_area_struct;
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */

192
include/linux/hashtable.h Normal file
View File

@@ -0,0 +1,192 @@
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
*/
#ifndef _LINUX_HASHTABLE_H
#define _LINUX_HASHTABLE_H
#include <linux/list.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rculist.h>
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
#define HASH_SIZE(name) (ARRAY_SIZE(name))
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
INIT_HLIST_HEAD(&ht[i]);
}
/**
* hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*
* Calculates the size of the hashtable from the given parameter, otherwise
* same as hash_init_size.
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
/**
* hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_add_rcu - add an object to a rcu enabled hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add_rcu(hashtable, node, key) \
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_hashed - check whether an object is in any hashtable
* @node: the &struct hlist_node of the object to be checked
*/
static inline bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;
return true;
}
/**
* hash_empty - check whether a hashtable is empty
* @hashtable: hashtable to check
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
/**
* hash_del - remove an object from a hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}
/**
* hash_del_rcu - remove an object from a rcu enabled hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del_rcu(struct hlist_node *node)
{
hlist_del_init_rcu(node);
}
/**
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @node: the &struct list_head to use as a loop cursor for each entry
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each(name, bkt, node, obj, member) \
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
hlist_for_each_entry(obj, node, &name[bkt], member)
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @node: the &struct list_head to use as a loop cursor for each entry
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu(name, bkt, node, obj, member) \
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @node: the &struct list_head to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \
for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member)
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @node: the &struct list_head to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible(name, obj, node, member, key) \
hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @node: the &struct list_head to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu(name, obj, node, member, key) \
hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @node: the &struct list_head to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \
hlist_for_each_entry_safe(obj, node, tmp, \
&name[hash_min(key, HASH_BITS(name))], member)
#endif

View File

@@ -1,35 +1,8 @@
#ifndef _LINUX_HW_BREAKPOINT_H
#define _LINUX_HW_BREAKPOINT_H
enum {
HW_BREAKPOINT_LEN_1 = 1,
HW_BREAKPOINT_LEN_2 = 2,
HW_BREAKPOINT_LEN_4 = 4,
HW_BREAKPOINT_LEN_8 = 8,
};
enum {
HW_BREAKPOINT_EMPTY = 0,
HW_BREAKPOINT_R = 1,
HW_BREAKPOINT_W = 2,
HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
HW_BREAKPOINT_X = 4,
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
};
enum bp_type_idx {
TYPE_INST = 0,
#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
TYPE_DATA = 0,
#else
TYPE_DATA = 1,
#endif
TYPE_MAX
};
#ifdef __KERNEL__
#include <linux/perf_event.h>
#include <uapi/linux/hw_breakpoint.h>
#ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -151,6 +124,4 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */
#endif /* _LINUX_HW_BREAKPOINT_H */

View File

@@ -34,6 +34,7 @@ struct omap_i2c_bus_platform_data {
u32 clkrate;
u32 rev;
u32 flags;
void (*set_mpu_wkup_lat)(struct device *dev, long set);
};
#endif

View File

@@ -618,4 +618,20 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
};
#endif
/**
* IIO_DEGREE_TO_RAD() - Convert degree to rad
* @deg: A value in degree
*
* Returns the given value converted from degree to rad
*/
#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
/**
* IIO_G_TO_M_S_2() - Convert g to meter / second**2
* @g: A value in g
*
* Returns the given value converted from g to meter / second**2
*/
#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
#endif /* _INDUSTRIAL_IO_H_ */

View File

@@ -701,6 +701,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#define COMPACTION_BUILD 0
#endif
/* This helps us to avoid #ifdef CONFIG_SYMBOL_PREFIX */
#ifdef CONFIG_SYMBOL_PREFIX
#define SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
#else
#define SYMBOL_PREFIX ""
#endif
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD

View File

@@ -42,19 +42,8 @@
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
/*
* If we support unaligned MMIO, at most one fragment will be split into two:
*/
#ifdef KVM_UNALIGNED_MMIO
# define KVM_EXTRA_MMIO_FRAGMENTS 1
#else
# define KVM_EXTRA_MMIO_FRAGMENTS 0
#endif
#define KVM_USER_MMIO_SIZE 8
#define KVM_MAX_MMIO_FRAGMENTS \
(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
/*
* For the normal pfn, the highest 12 bits should be zero,

View File

@@ -57,6 +57,7 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,

View File

@@ -82,16 +82,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
__mpol_put(pol);
}
extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
struct mempolicy *frompol);
static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
struct mempolicy *frompol)
{
if (!frompol)
return frompol;
return __mpol_cond_copy(tompol, frompol);
}
extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{
@@ -215,12 +205,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
{
}
static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
struct mempolicy *from)
{
return from;
}
static inline void mpol_get(struct mempolicy *pol)
{
}

View File

@@ -30,7 +30,20 @@
#ifndef __LINUX_MFD_MAX77693_H
#define __LINUX_MFD_MAX77693_H
struct max77693_reg_data {
u8 addr;
u8 data;
};
struct max77693_muic_platform_data {
struct max77693_reg_data *init_data;
int num_init_data;
};
struct max77693_platform_data {
int wakeup;
/* muic data */
struct max77693_muic_platform_data *muic_data;
};
#endif /* __LINUX_MFD_MAX77693_H */

View File

@@ -1684,9 +1684,5 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */
extern void reset_zone_present_pages(void);
extern void fixup_zone_present_pages(int nid, unsigned long start_pfn,
unsigned long end_pfn);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */

View File

@@ -137,7 +137,7 @@ struct dw_mci {
dma_addr_t sg_dma;
void *sg_cpu;
struct dw_mci_dma_ops *dma_ops;
const struct dw_mci_dma_ops *dma_ops;
#ifdef CONFIG_MMC_DW_IDMAC
unsigned int ring_size;
#else
@@ -162,7 +162,7 @@ struct dw_mci {
u16 data_offset;
struct device *dev;
struct dw_mci_board *pdata;
struct dw_mci_drv_data *drv_data;
const struct dw_mci_drv_data *drv_data;
void *priv;
struct clk *biu_clk;
struct clk *ciu_clk;
@@ -186,7 +186,7 @@ struct dw_mci {
struct regulator *vmmc; /* Power regulator */
unsigned long irq_flags; /* IRQ flags */
unsigned int irq;
int irq;
};
/* DMA ops for Internal/External DMAC interface */

View File

@@ -91,6 +91,7 @@ struct sdhci_host {
unsigned int quirks2; /* More deviations from spec. */
#define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0)
#define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */

View File

@@ -752,7 +752,7 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size,
enum memmap_context context);
extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
extern void lruvec_init(struct lruvec *lruvec);
static inline struct zone *lruvec_zone(struct lruvec *lruvec)
{

View File

@@ -28,11 +28,13 @@ static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
#endif
#else /* CONFIG_OF_ADDRESS */
#ifndef of_address_to_resource
static inline int of_address_to_resource(struct device_node *dev, int index,
struct resource *r)
{
return -EINVAL;
}
#endif
static inline struct device_node *of_find_matching_node_by_address(
struct device_node *from,
const struct of_device_id *matches,
@@ -40,10 +42,12 @@ static inline struct device_node *of_find_matching_node_by_address(
{
return NULL;
}
#ifndef of_iomap
static inline void __iomem *of_iomap(struct device_node *device, int index)
{
return NULL;
}
#endif
static inline const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags)
{

View File

@@ -12,34 +12,27 @@ struct percpu_rw_semaphore {
struct mutex mtx;
};
#define light_mb() barrier()
#define heavy_mb() synchronize_sched_expedited()
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{
rcu_read_lock();
rcu_read_lock_sched();
if (unlikely(p->locked)) {
rcu_read_unlock();
rcu_read_unlock_sched();
mutex_lock(&p->mtx);
this_cpu_inc(*p->counters);
mutex_unlock(&p->mtx);
return;
}
this_cpu_inc(*p->counters);
rcu_read_unlock();
rcu_read_unlock_sched();
light_mb(); /* A, between read of p->locked and read of data, paired with D */
}
static inline void percpu_up_read(struct percpu_rw_semaphore *p)
{
/*
* On X86, write operation in this_cpu_dec serves as a memory unlock
* barrier (i.e. memory accesses may be moved before the write, but
* no memory accesses are moved past the write).
* On other architectures this may not be the case, so we need smp_mb()
* there.
*/
#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
barrier();
#else
smp_mb();
#endif
light_mb(); /* B, between read of the data and write to p->counter, paired with C */
this_cpu_dec(*p->counters);
}
@@ -58,14 +51,15 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p)
{
mutex_lock(&p->mtx);
p->locked = true;
synchronize_rcu();
synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
while (__percpu_count(p->counters))
msleep(1);
smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
}
static inline void percpu_up_write(struct percpu_rw_semaphore *p)
{
heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
p->locked = false;
mutex_unlock(&p->mtx);
}

View File

@@ -803,12 +803,16 @@ static inline void perf_event_task_tick(void) { }
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
unsigned long cpu = smp_processor_id(); \
unsigned long flags; \
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
(void *)(unsigned long)smp_processor_id()); \
(void *)(unsigned long)cpu); \
local_irq_save(flags); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
(void *)(unsigned long)smp_processor_id()); \
(void *)(unsigned long)cpu); \
local_irq_restore(flags); \
fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
(void *)(unsigned long)smp_processor_id()); \
(void *)(unsigned long)cpu); \
register_cpu_notifier(&fn##_nb); \
} while (0)

View File

@@ -0,0 +1,31 @@
/*
* omap_ocp2scp.h -- ocp2scp header file
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __DRIVERS_OMAP_OCP2SCP_H
#define __DRIVERS_OMAP_OCP2SCP_H
struct omap_ocp2scp_dev {
const char *drv_name;
struct resource *res;
};
struct omap_ocp2scp_platform_data {
int dev_cnt;
struct omap_ocp2scp_dev **devices;
};
#endif /* __DRIVERS_OMAP_OCP2SCP_H */

View File

@@ -54,7 +54,8 @@ struct ptp_clock_request {
* clock operations
*
* @adjfreq: Adjusts the frequency of the hardware clock.
* parameter delta: Desired period change in parts per billion.
* parameter delta: Desired frequency offset from nominal frequency
* in parts per billion
*
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.

View File

@@ -1,2 +0,0 @@
header-y += md_p.h
header-y += md_u.h

View File

@@ -1,301 +0,0 @@
/*
md_p.h : physical layout of Linux RAID devices
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_P_H
#define _MD_P_H
#include <linux/types.h>
/*
* RAID superblock.
*
* The RAID superblock maintains some statistics on each RAID configuration.
* Each real device in the RAID set contains it near the end of the device.
* Some of the ideas are copied from the ext2fs implementation.
*
* We currently use 4096 bytes as follows:
*
* word offset function
*
* 0 - 31 Constant generic RAID device information.
* 32 - 63 Generic state information.
* 64 - 127 Personality specific information.
* 128 - 511 12 32-words descriptors of the disks in the raid set.
* 512 - 911 Reserved.
* 912 - 1023 Disk specific descriptor.
*/
/*
* If x is the real device size in bytes, we return an apparent size of:
*
* y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
*
* and place the 4kB superblock at offset y.
*/
#define MD_RESERVED_BYTES (64 * 1024)
#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
#define MD_SB_BYTES 4096
#define MD_SB_WORDS (MD_SB_BYTES / 4)
#define MD_SB_SECTORS (MD_SB_BYTES / 512)
/*
* The following are counted in 32-bit words
*/
#define MD_SB_GENERIC_OFFSET 0
#define MD_SB_PERSONALITY_OFFSET 64
#define MD_SB_DISKS_OFFSET 128
#define MD_SB_DESCRIPTOR_OFFSET 992
#define MD_SB_GENERIC_CONSTANT_WORDS 32
#define MD_SB_GENERIC_STATE_WORDS 32
#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
#define MD_SB_PERSONALITY_WORDS 64
#define MD_SB_DESCRIPTOR_WORDS 32
#define MD_SB_DISKS 27
#define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS)
#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
/*
* Device "operational" state bits
*/
#define MD_DISK_FAULTY 0 /* disk is faulty / operational */
#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
* read requests will only be sent here in
* dire need
*/
typedef struct mdp_device_descriptor_s {
__u32 number; /* 0 Device number in the entire set */
__u32 major; /* 1 Device major number */
__u32 minor; /* 2 Device minor number */
__u32 raid_disk; /* 3 The role of the device in the raid set */
__u32 state; /* 4 Operational state */
__u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
} mdp_disk_t;
#define MD_SB_MAGIC 0xa92b4efc
/*
* Superblock state bits
*/
#define MD_SB_CLEAN 0
#define MD_SB_ERRORS 1
#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
/*
* Notes:
* - if an array is being reshaped (restriped) in order to change the
* the number of active devices in the array, 'raid_disks' will be
* the larger of the old and new numbers. 'delta_disks' will
* be the "new - old". So if +ve, raid_disks is the new value, and
* "raid_disks-delta_disks" is the old. If -ve, raid_disks is the
* old value and "raid_disks+delta_disks" is the new (smaller) value.
*/
typedef struct mdp_superblock_s {
/*
* Constant generic information
*/
__u32 md_magic; /* 0 MD identifier */
__u32 major_version; /* 1 major version to which the set conforms */
__u32 minor_version; /* 2 minor version ... */
__u32 patch_version; /* 3 patchlevel version ... */
__u32 gvalid_words; /* 4 Number of used words in this section */
__u32 set_uuid0; /* 5 Raid set identifier */
__u32 ctime; /* 6 Creation time */
__u32 level; /* 7 Raid personality */
__u32 size; /* 8 Apparent size of each individual disk */
__u32 nr_disks; /* 9 total disks in the raid set */
__u32 raid_disks; /* 10 disks in a fully functional raid set */
__u32 md_minor; /* 11 preferred MD minor device number */
__u32 not_persistent; /* 12 does it have a persistent superblock */
__u32 set_uuid1; /* 13 Raid set identifier #2 */
__u32 set_uuid2; /* 14 Raid set identifier #3 */
__u32 set_uuid3; /* 15 Raid set identifier #4 */
__u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16];
/*
* Generic state information
*/
__u32 utime; /* 0 Superblock update time */
__u32 state; /* 1 State bits (clean, ...) */
__u32 active_disks; /* 2 Number of currently active disks */
__u32 working_disks; /* 3 Number of working disks */
__u32 failed_disks; /* 4 Number of failed disks */
__u32 spare_disks; /* 5 Number of spare disks */
__u32 sb_csum; /* 6 checksum of the whole superblock */
#ifdef __BIG_ENDIAN
__u32 events_hi; /* 7 high-order of superblock update count */
__u32 events_lo; /* 8 low-order of superblock update count */
__u32 cp_events_hi; /* 9 high-order of checkpoint update count */
__u32 cp_events_lo; /* 10 low-order of checkpoint update count */
#else
__u32 events_lo; /* 7 low-order of superblock update count */
__u32 events_hi; /* 8 high-order of superblock update count */
__u32 cp_events_lo; /* 9 low-order of checkpoint update count */
__u32 cp_events_hi; /* 10 high-order of checkpoint update count */
#endif
__u32 recovery_cp; /* 11 recovery checkpoint sector count */
/* There are only valid for minor_version > 90 */
__u64 reshape_position; /* 12,13 next address in array-space for reshape */
__u32 new_level; /* 14 new level we are reshaping to */
__u32 delta_disks; /* 15 change in number of raid_disks */
__u32 new_layout; /* 16 new layout */
__u32 new_chunk; /* 17 new chunk size (bytes) */
__u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
/*
* Personality information
*/
__u32 layout; /* 0 the array's physical layout */
__u32 chunk_size; /* 1 chunk size in bytes */
__u32 root_pv; /* 2 LV root PV */
__u32 root_block; /* 3 LV root block */
__u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4];
/*
* Disks information
*/
mdp_disk_t disks[MD_SB_DISKS];
/*
* Reserved
*/
__u32 reserved[MD_SB_RESERVED_WORDS];
/*
* Active descriptor
*/
mdp_disk_t this_disk;
} mdp_super_t;
static inline __u64 md_event(mdp_super_t *sb) {
__u64 ev = sb->events_hi;
return (ev<<32)| sb->events_lo;
}
#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1)
/*
* The version-1 superblock :
* All numeric fields are little-endian.
*
* total size: 256 bytes plus 2 per device.
* 1K allows 384 devices.
*/
struct mdp_superblock_1 {
/* constant array information - 128 bytes */
__le32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */
__le32 major_version; /* 1 */
__le32 feature_map; /* bit 0 set if 'bitmap_offset' is meaningful */
__le32 pad0; /* always set to 0 when writing */
__u8 set_uuid[16]; /* user-space generated. */
char set_name[32]; /* set and interpreted by user-space */
__le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
__le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
__le32 layout; /* only for raid5 and raid10 currently */
__le64 size; /* used size of component devices, in 512byte sectors */
__le32 chunksize; /* in 512byte sectors */
__le32 raid_disks;
__le32 bitmap_offset; /* sectors after start of superblock that bitmap starts
* NOTE: signed, so bitmap can be before superblock
* only meaningful of feature_map[0] is set.
*/
/* These are only valid with feature bit '4' */
__le32 new_level; /* new level we are reshaping to */
__le64 reshape_position; /* next address in array-space for reshape */
__le32 delta_disks; /* change in number of raid_disks */
__le32 new_layout; /* new layout */
__le32 new_chunk; /* new chunk size (512byte sectors) */
__le32 new_offset; /* signed number to add to data_offset in new
* layout. 0 == no-change. This can be
* different on each device in the array.
*/
/* constant this-device information - 64 bytes */
__le64 data_offset; /* sector start of data, often 0 */
__le64 data_size; /* sectors in this device that can be used for data */
__le64 super_offset; /* sector start of this superblock */
__le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
__le32 dev_number; /* permanent identifier of this device - not role in raid */
__le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
__u8 device_uuid[16]; /* user-space setable, ignored by kernel */
__u8 devflags; /* per-device flags. Only one defined...*/
#define WriteMostly1 1 /* mask for writemostly flag in above */
/* Bad block log. If there are any bad blocks the feature flag is set.
* If offset and size are non-zero, that space is reserved and available
*/
__u8 bblog_shift; /* shift from sectors to block size */
__le16 bblog_size; /* number of sectors reserved for list */
__le32 bblog_offset; /* sector offset from superblock to bblog,
* signed - not unsigned */
/* array state information - 64 bytes */
__le64 utime; /* 40 bits second, 24 bits microseconds */
__le64 events; /* incremented when superblock updated */
__le64 resync_offset; /* data before this offset (from data_offset) known to be in sync */
__le32 sb_csum; /* checksum up to devs[max_dev] */
__le32 max_dev; /* size of devs[] array to consider */
__u8 pad3[64-32]; /* set to 0 when writing */
/* device state information. Indexed by dev_number.
* 2 bytes per device
* Note there are no per-device state flags. State information is rolled
* into the 'roles' value. If a device is spare or faulty, then it doesn't
* have a meaningful role.
*/
__le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
};
/* feature_map bits */
#define MD_FEATURE_BITMAP_OFFSET 1
#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
* must be honoured
*/
#define MD_FEATURE_RESHAPE_ACTIVE 4
#define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */
#define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an
* active device with same 'role'.
* 'recovery_offset' is also set.
*/
#define MD_FEATURE_RESHAPE_BACKWARDS 32 /* Reshape doesn't change number
* of devices, but is going
* backwards anyway.
*/
#define MD_FEATURE_NEW_OFFSET 64 /* new_offset must be honoured */
#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
|MD_FEATURE_RECOVERY_OFFSET \
|MD_FEATURE_RESHAPE_ACTIVE \
|MD_FEATURE_BAD_BLOCKS \
|MD_FEATURE_REPLACEMENT \
|MD_FEATURE_RESHAPE_BACKWARDS \
|MD_FEATURE_NEW_OFFSET \
)
#endif

View File

@@ -11,149 +11,10 @@
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_U_H
#define _MD_U_H
/*
* Different major versions are not compatible.
* Different minor versions are only downward compatible.
* Different patchlevel versions are downward and upward compatible.
*/
#define MD_MAJOR_VERSION 0
#define MD_MINOR_VERSION 90
/*
* MD_PATCHLEVEL_VERSION indicates kernel functionality.
* >=1 means different superblock formats are selectable using SET_ARRAY_INFO
* and major_version/minor_version accordingly
* >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
* in the super status byte
* >=3 means that bitmap superblock version 4 is supported, which uses
* little-ending representation rather than host-endian
*/
#define MD_PATCHLEVEL_VERSION 3
#include <uapi/linux/raid/md_u.h>
/* ioctls */
/* status */
#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
#define RAID_AUTORUN _IO (MD_MAJOR, 0x14)
#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t)
/* configuration */
#define CLEAR_ARRAY _IO (MD_MAJOR, 0x20)
#define ADD_NEW_DISK _IOW (MD_MAJOR, 0x21, mdu_disk_info_t)
#define HOT_REMOVE_DISK _IO (MD_MAJOR, 0x22)
#define SET_ARRAY_INFO _IOW (MD_MAJOR, 0x23, mdu_array_info_t)
#define SET_DISK_INFO _IO (MD_MAJOR, 0x24)
#define WRITE_RAID_INFO _IO (MD_MAJOR, 0x25)
#define UNPROTECT_ARRAY _IO (MD_MAJOR, 0x26)
#define PROTECT_ARRAY _IO (MD_MAJOR, 0x27)
#define HOT_ADD_DISK _IO (MD_MAJOR, 0x28)
#define SET_DISK_FAULTY _IO (MD_MAJOR, 0x29)
#define HOT_GENERATE_ERROR _IO (MD_MAJOR, 0x2a)
#define SET_BITMAP_FILE _IOW (MD_MAJOR, 0x2b, int)
/* usage */
#define RUN_ARRAY _IOW (MD_MAJOR, 0x30, mdu_param_t)
/* 0x31 was START_ARRAY */
#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
/* 63 partitions with the alternate major number (mdp) */
#define MdpMinorShift 6
#ifdef __KERNEL__
extern int mdp_major;
#endif
typedef struct mdu_version_s {
int major;
int minor;
int patchlevel;
} mdu_version_t;
typedef struct mdu_array_info_s {
/*
* Generic constant information
*/
int major_version;
int minor_version;
int patch_version;
int ctime;
int level;
int size;
int nr_disks;
int raid_disks;
int md_minor;
int not_persistent;
/*
* Generic state information
*/
int utime; /* 0 Superblock update time */
int state; /* 1 State bits (clean, ...) */
int active_disks; /* 2 Number of currently active disks */
int working_disks; /* 3 Number of working disks */
int failed_disks; /* 4 Number of failed disks */
int spare_disks; /* 5 Number of spare disks */
/*
* Personality information
*/
int layout; /* 0 the array's physical layout */
int chunk_size; /* 1 chunk size in bytes */
} mdu_array_info_t;
/* non-obvious values for 'level' */
#define LEVEL_MULTIPATH (-4)
#define LEVEL_LINEAR (-1)
#define LEVEL_FAULTY (-5)
/* we need a value for 'no level specified' and 0
* means 'raid0', so we need something else. This is
* for internal use only
*/
#define LEVEL_NONE (-1000000)
typedef struct mdu_disk_info_s {
/*
* configuration/status of one particular disk
*/
int number;
int major;
int minor;
int raid_disk;
int state;
} mdu_disk_info_t;
typedef struct mdu_start_info_s {
/*
* configuration/status of one particular disk
*/
int major;
int minor;
int raid_disk;
int state;
} mdu_start_info_t;
typedef struct mdu_bitmap_file_s
{
char pathname[4096];
} mdu_bitmap_file_t;
typedef struct mdu_param_s
{
int personality; /* 1,2,3,4 */
int chunk_size; /* in bytes */
int max_fault; /* unused for now */
} mdu_param_t;
#endif

View File

@@ -24,6 +24,7 @@
#ifndef _LINUX_RBTREE_AUGMENTED_H
#define _LINUX_RBTREE_AUGMENTED_H
#include <linux/compiler.h>
#include <linux/rbtree.h>
/*

View File

@@ -275,9 +275,11 @@ struct rio_id_table {
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
* @devices: List of devices in this network
* @switches: List of switches in this netowrk
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
* @destid_table: destID allocation table
*/
struct rio_net {
struct list_head node; /* node in list of networks */

View File

@@ -46,8 +46,9 @@ struct ads7846_platform_data {
u16 debounce_rep; /* additional consecutive good readings
* required after the first two */
int gpio_pendown; /* the GPIO used to decide the pendown
* state if get_pendown_state == NULL
*/
* state if get_pendown_state == NULL */
int gpio_pendown_debounce; /* platform specific debounce time for
* the gpio_pendown */
int (*get_pendown_state)(void);
int (*filter_init) (const struct ads7846_platform_data *pdata,
void **filter_data);

View File

@@ -3,8 +3,6 @@
*
* Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Aaro Koskinen <aaro.koskinen@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or

View File

@@ -191,7 +191,8 @@ struct tcp_sock {
u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
early_retrans_delayed:1, /* Delayed ER timer installed */
syn_data:1, /* SYN includes data */
syn_fastopen:1; /* SYN includes Fast Open option */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
/* RTT measurement */
u32 srtt; /* smoothed round trip time << 3 */