Merge 3.8-rc5 into staging-next

This resolves a merge issue with a iio driver, and the zram code.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman
2013-01-25 21:25:02 -08:00
1884 changed files with 18715 additions and 10347 deletions

View File

@@ -297,10 +297,12 @@ enum {
ATA_LOG_SATA_NCQ = 0x10,
ATA_LOG_SATA_ID_DEV_DATA = 0x30,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x30,
ATA_LOG_DEVSLP_OFFSET = 0x30,
ATA_LOG_DEVSLP_SIZE = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x00,
ATA_LOG_DEVSLP_MDAT_MASK = 0x1F,
ATA_LOG_DEVSLP_DETO = 0x31,
ATA_LOG_DEVSLP_VALID = 0x37,
ATA_LOG_DEVSLP_DETO = 0x01,
ATA_LOG_DEVSLP_VALID = 0x07,
ATA_LOG_DEVSLP_VALID_MASK = 0x80,
/* READ/WRITE LONG (obsolete) */

View File

@@ -15,12 +15,12 @@ struct pata_platform_info {
unsigned int irq_flags;
};
extern int __devinit __pata_platform_probe(struct device *dev,
struct resource *io_res,
struct resource *ctl_res,
struct resource *irq_res,
unsigned int ioport_shift,
int __pio_mask);
extern int __pata_platform_probe(struct device *dev,
struct resource *io_res,
struct resource *ctl_res,
struct resource *irq_res,
unsigned int ioport_shift,
int __pio_mask);
/*
* Marvell SATA private data

View File

@@ -24,6 +24,7 @@
#define _LINUX_AUDIT_H_
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
struct audit_sig_info {
@@ -157,7 +158,8 @@ void audit_core_dumps(long signr);
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{
if (unlikely(!audit_dummy_context()))
/* Force a record to be reported if a signal was delivered. */
if (signr || unlikely(!audit_dummy_context()))
__audit_seccomp(syscall, signr, code);
}

View File

@@ -92,7 +92,7 @@ struct bcma_drv_gmac_cmn {
#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
extern void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
#else
static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
#endif

View File

@@ -214,7 +214,7 @@ struct bcma_drv_pci {
#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
extern void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc);
extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend);

View File

@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
bool sync, bool *contended, struct page **page);
bool sync, bool *contended);
extern int compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
bool sync, bool *contended, struct page **page)
bool sync, bool *contended)
{
return COMPACT_CONTINUE;
}

View File

@@ -13,9 +13,11 @@
#include <linux/cpumask.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/kref.h>
/**
* struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped
* @used: Number of objects added
* @obj: Pointer to array of object pointers
@@ -23,6 +25,7 @@
* based on affinity masks
*/
struct cpu_rmap {
struct kref refcount;
u16 size, used;
void **obj;
struct {
@@ -33,15 +36,7 @@ struct cpu_rmap {
#define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
/**
* free_cpu_rmap - free CPU affinity reverse-map
* @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
*/
static inline void free_cpu_rmap(struct cpu_rmap *rmap)
{
kfree(rmap);
}
extern int cpu_rmap_put(struct cpu_rmap *rmap);
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,

View File

@@ -126,9 +126,9 @@ struct cpuidle_driver {
struct module *owner;
int refcnt;
unsigned int power_specified:1;
/* set to 1 to use the core cpuidle time keeping (for all states). */
unsigned int en_core_tk_irqen:1;
/* states array must be ordered in decreasing power consumption */
struct cpuidle_state states[CPUIDLE_STATE_MAX];
int state_count;
int safe_state_index;

View File

@@ -128,7 +128,6 @@ struct cred {
struct key *process_keyring; /* keyring private to this process */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
struct thread_group_cred *tgcred; /* thread-group shared credentials */
#endif
#ifdef CONFIG_SECURITY
void *security; /* subjective LSM security */

View File

@@ -93,14 +93,6 @@
#define __exit __section(.exit.text) __exitused __cold notrace
/* Used for HOTPLUG, but that is always enabled now, so just make them noops */
#define __devinit
#define __devinitdata
#define __devinitconst
#define __devexit
#define __devexitdata
#define __devexitconst
/* Used for HOTPLUG_CPU */
#define __cpuinit __section(.cpuinit.text) __cold notrace
#define __cpuinitdata __section(.cpuinit.data)
@@ -337,18 +329,6 @@ void __init parse_early_options(char *cmdline);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
/* Functions marked as __devexit may be discarded at kernel link time, depending
on config options. Newer versions of binutils detect references from
retained sections to discarded sections and flag an error. Pointers to
__devexit functions must use __devexit_p(function_name), the wrapper will
insert either the function_name or NULL, depending on the config options.
*/
#if defined(MODULE) || defined(CONFIG_HOTPLUG)
#define __devexit_p(x) x
#else
#define __devexit_p(x) NULL
#endif
#ifdef MODULE
#define __exit_p(x) x
#else

View File

@@ -268,11 +268,6 @@ struct irq_affinity_notify {
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
static inline void irq_run_affinity_notifiers(void)
{
flush_scheduled_work();
}
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)

View File

@@ -24,6 +24,7 @@ struct ipc_ids {
unsigned short seq_max;
struct rw_semaphore rw_mutex;
struct idr ipcs_idr;
int next_id;
};
struct ipc_namespace {

View File

@@ -652,8 +652,8 @@ struct ata_device {
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
};
/* Identify Device Data Log (30h), SATA Settings (page 08h) */
u8 sata_settings[ATA_SECT_SIZE];
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
/* error history */
int spdn_cnt;

View File

@@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
# define rwsem_acquire(l, s, t, i) do { } while (0)
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0)
#endif

View File

@@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
int split_free_page(struct page *page);
int capture_free_page(struct page *page, int alloc_order, int migratetype);
/*
* Compound pages have a destructor function. Provide a

View File

@@ -503,14 +503,6 @@ struct zone {
* rarely used fields:
*/
const char *name;
#ifdef CONFIG_MEMORY_ISOLATION
/*
* the number of MIGRATE_ISOLATE *pageblock*.
* We need this for free page counting. Look at zone_watermark_ok_safe.
* It's protected by zone->lock
*/
int nr_pageblock_isolate;
#endif
} ____cacheline_internodealigned_in_smp;
typedef enum {

View File

@@ -199,11 +199,11 @@ struct module_use {
struct module *source, *target;
};
enum module_state
{
MODULE_STATE_LIVE,
MODULE_STATE_COMING,
MODULE_STATE_GOING,
enum module_state {
MODULE_STATE_LIVE, /* Normal state. */
MODULE_STATE_COMING, /* Full formed, running module_init. */
MODULE_STATE_GOING, /* Going away. */
MODULE_STATE_UNFORMED, /* Still setting it up. */
};
/**

View File

@@ -34,7 +34,9 @@ struct msg_queue {
/* Helper routines for sys_msgsnd and sys_msgrcv */
extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
size_t msgsz, int msgflg);
extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
size_t msgsz, long msgtyp, int msgflg);
extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
int msgflg,
long (*msg_fill)(void __user *, struct msg_msg *,
size_t));
#endif /* _LINUX_MSG_H */

View File

@@ -2,6 +2,7 @@
#define _LINUX_NAMEI_H
#include <linux/dcache.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/path.h>

View File

@@ -60,6 +60,9 @@ struct wireless_dev;
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
extern void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
/* hardware address assignment types */
#define NET_ADDR_PERM 0 /* address is permanent (default) */
#define NET_ADDR_RANDOM 1 /* address is generated randomly */

View File

@@ -0,0 +1,41 @@
/*
* Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/errno.h>
#ifdef CONFIG_IRAM_ALLOC
int __init iram_init(unsigned long base, unsigned long size);
void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr);
void iram_free(unsigned long dma_addr, unsigned int size);
#else
static inline int __init iram_init(unsigned long base, unsigned long size)
{
return -ENOMEM;
}
static inline void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr)
{
return NULL;
}
static inline void iram_free(unsigned long base, unsigned long size) {}
#endif

View File

@@ -46,9 +46,8 @@ struct persistent_ram_zone {
size_t old_log_size;
};
struct persistent_ram_zone * __devinit persistent_ram_new(phys_addr_t start,
size_t size, u32 sig,
int ecc_size);
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, int ecc_size);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);

View File

@@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request,
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern void ptrace_disable(struct task_struct *);
extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern void ptrace_notify(int exit_code);

View File

@@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
@@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root,
}
augment->propagate(tmp, NULL);
return rebalance;
}
static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}

View File

@@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem);
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
# define down_write_nest_lock(sem, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
} while (0);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
#endif

View File

@@ -1810,6 +1810,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
@@ -2713,7 +2714,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void signal_wake_up(struct task_struct *t, int resume_stopped);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
static inline void signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.