Merge tag 'v3.18-rc7' into drm-next

This fixes a bunch of conflicts prior to merging i915 tree.

Linux 3.18-rc7

Conflicts:
	drivers/gpu/drm/exynos/exynos_drm_drv.c
	drivers/gpu/drm/i915/i915_drv.c
	drivers/gpu/drm/i915/intel_pm.c
	drivers/gpu/drm/tegra/dc.c
This commit is contained in:
Dave Airlie
2014-12-02 10:58:33 +10:00
590 changed files with 5475 additions and 2568 deletions

View File

@@ -18,8 +18,11 @@
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
#define GENMASK(h, l) \
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);

View File

@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
extern unsigned long free_all_bootmem(void);
extern void reset_node_managed_pages(pg_data_t *pgdat);
extern void reset_all_zones_managed_pages(void);
extern void free_bootmem_node(pg_data_t *pgdat,

View File

@@ -99,6 +99,12 @@ inval_skb:
return 1;
}
static inline bool can_is_canfd_skb(const struct sk_buff *skb)
{
/* the CAN specific type of skb is identified by its data length */
return skb->len == CANFD_MTU;
}
/* get data length from can_dlc with sanitized can_dlc */
u8 can_dlc2len(u8 can_dlc);

View File

@@ -352,7 +352,6 @@ struct clk_divider {
#define CLK_DIVIDER_READ_ONLY BIT(5)
extern const struct clk_ops clk_divider_ops;
extern const struct clk_ops clk_divider_ro_ops;
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,

View File

@@ -72,7 +72,7 @@ struct iio_event_data {
#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)

View File

@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
static __inline__ __be32 inet_make_mask(int logmask)
{
if (logmask)
return htonl(~((1<<(32-logmask))-1));
return htonl(~((1U<<(32-logmask))-1));
return 0;
}

View File

@@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
/*
* Lock/unlock the current runqueue - to extract task statistics:
*/
extern unsigned long long task_delta_exec(struct task_struct *);
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
extern void account_steal_time(cputime_t);

View File

@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_is_mmio_pfn(pfn_t pfn);
bool kvm_is_reserved_pfn(pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;

View File

@@ -330,6 +330,13 @@ enum max77693_irq_source {
MAX77693_IRQ_GROUP_NR,
};
#define SRC_IRQ_CHARGER BIT(0)
#define SRC_IRQ_TOP BIT(1)
#define SRC_IRQ_FLASH BIT(2)
#define SRC_IRQ_MUIC BIT(3)
#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
| SRC_IRQ_FLASH | SRC_IRQ_MUIC)
#define LED_IRQ_FLED2_OPEN BIT(0)
#define LED_IRQ_FLED2_SHORT BIT(1)
#define LED_IRQ_FLED1_OPEN BIT(2)

View File

@@ -431,6 +431,15 @@ struct zone {
*/
int nr_migrate_reserve_block;
#ifdef CONFIG_MEMORY_ISOLATION
/*
* Number of isolated pageblock. It is used to solve incorrect
* freepage counting problem due to racy retrieving migratetype
* of pageblock. Protected by zone->lock.
*/
unsigned long nr_isolate_pageblock;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;

View File

@@ -1224,11 +1224,22 @@ struct nfs41_free_stateid_res {
unsigned int status;
};
static inline void
nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
{
kfree(cinfo->buckets);
}
#else
struct pnfs_ds_commit_info {
};
static inline void
nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
{
}
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2

View File

@@ -2,6 +2,10 @@
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
static inline bool has_isolate_pageblock(struct zone *zone)
{
return zone->nr_isolate_pageblock;
}
static inline bool is_migrate_isolate_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
return migratetype == MIGRATE_ISOLATE;
}
#else
static inline bool has_isolate_pageblock(struct zone *zone)
{
return false;
}
static inline bool is_migrate_isolate_page(struct page *page)
{
return false;

View File

@@ -331,6 +331,7 @@ struct pci_dev {
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */

View File

@@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
/* paired with smp_store_release() in percpu_ref_reinit() */
smp_read_barrier_depends();
if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be
* visible without ATOMIC if we race with percpu_ref_kill(). DEAD
* implies ATOMIC anyway. Test them together.
*/
if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
return false;
*percpu_countp = (unsigned long __percpu *)percpu_ptr;

View File

@@ -72,8 +72,10 @@ struct generic_pm_domain {
bool max_off_time_changed;
bool cached_power_down_ok;
struct gpd_cpuidle_data *cpuidle_data;
void (*attach_dev)(struct device *dev);
void (*detach_dev)(struct device *dev);
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
struct device *dev);
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -104,7 +106,7 @@ struct generic_pm_domain_data {
struct notifier_block nb;
struct mutex lock;
unsigned int refcount;
bool need_restore;
int need_restore;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS

View File

@@ -253,9 +253,6 @@ struct charger_manager {
struct device *dev;
struct charger_desc *desc;
struct power_supply *fuel_gauge;
struct power_supply **charger_stat;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd_batt;
#endif

View File

@@ -200,6 +200,12 @@ struct power_supply {
void (*external_power_changed)(struct power_supply *psy);
void (*set_charged)(struct power_supply *psy);
/*
* Set if thermal zone should not be created for this power supply.
* For example for virtual supplies forwarding calls to actual
* sensors or other supplies.
*/
bool no_thermal;
/* For APM emulation, think legacy userspace. */
int use_for_apm;

View File

@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);

View File

@@ -256,7 +256,7 @@ struct ucred {
#define MSG_EOF MSG_FIN
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
descriptor received through
SCM_RIGHTS */
#if defined(CONFIG_COMPAT)