Merge commit 'origin/master' into next
This commit is contained in:
@@ -52,6 +52,7 @@ header-y += const.h
|
||||
header-y += cgroupstats.h
|
||||
header-y += cramfs_fs.h
|
||||
header-y += cycx_cfm.h
|
||||
header-y += dcbnl.h
|
||||
header-y += dlmconstants.h
|
||||
header-y += dlm_device.h
|
||||
header-y += dlm_netlink.h
|
||||
|
@@ -89,6 +89,8 @@ enum {
|
||||
ATA_ID_DLF = 128,
|
||||
ATA_ID_CSFO = 129,
|
||||
ATA_ID_CFA_POWER = 160,
|
||||
ATA_ID_CFA_KEY_MGMT = 162,
|
||||
ATA_ID_CFA_MODES = 163,
|
||||
ATA_ID_ROT_SPEED = 217,
|
||||
ATA_ID_PIO4 = (1 << 1),
|
||||
|
||||
|
@@ -171,8 +171,6 @@ struct bio {
|
||||
#define BIO_RW_FAILFAST_TRANSPORT 8
|
||||
#define BIO_RW_FAILFAST_DRIVER 9
|
||||
|
||||
#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG)
|
||||
|
||||
#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
|
||||
|
||||
/*
|
||||
|
@@ -708,6 +708,8 @@ struct req_iterator {
|
||||
};
|
||||
|
||||
/* This should not be used directly - use rq_for_each_segment */
|
||||
#define for_each_bio(_bio) \
|
||||
for (; _bio; _bio = _bio->bi_next)
|
||||
#define __rq_for_each_bio(_bio, rq) \
|
||||
if ((rq->bio)) \
|
||||
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
|
||||
|
@@ -15,6 +15,7 @@ enum blktrace_cat {
|
||||
BLK_TC_WRITE = 1 << 1, /* writes */
|
||||
BLK_TC_BARRIER = 1 << 2, /* barrier */
|
||||
BLK_TC_SYNC = 1 << 3, /* sync IO */
|
||||
BLK_TC_SYNCIO = BLK_TC_SYNC,
|
||||
BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
|
||||
BLK_TC_REQUEUE = 1 << 5, /* requeueing */
|
||||
BLK_TC_ISSUE = 1 << 6, /* issue */
|
||||
|
@@ -234,7 +234,6 @@ struct cpufreq_driver {
|
||||
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
|
||||
int (*resume) (struct cpufreq_policy *policy);
|
||||
struct freq_attr **attr;
|
||||
bool hide_interface;
|
||||
};
|
||||
|
||||
/* flags */
|
||||
|
@@ -20,10 +20,12 @@
|
||||
#ifndef __LINUX_DCBNL_H__
|
||||
#define __LINUX_DCBNL_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define DCB_PROTO_VERSION 1
|
||||
|
||||
struct dcbmsg {
|
||||
unsigned char dcb_family;
|
||||
__u8 dcb_family;
|
||||
__u8 cmd;
|
||||
__u16 dcb_pad;
|
||||
};
|
||||
|
@@ -147,6 +147,8 @@ extern void put_driver(struct device_driver *drv);
|
||||
extern struct device_driver *driver_find(const char *name,
|
||||
struct bus_type *bus);
|
||||
extern int driver_probe_done(void);
|
||||
extern int wait_for_device_probe(void);
|
||||
|
||||
|
||||
/* sysfs interface for exporting driver attributes */
|
||||
|
||||
|
@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
|
||||
|
||||
/**
|
||||
* struct dma_chan_percpu - the per-CPU part of struct dma_chan
|
||||
* @refcount: local_t used for open-coded "bigref" counting
|
||||
* @memcpy_count: transaction counter
|
||||
* @bytes_transferred: byte counter
|
||||
*/
|
||||
@@ -114,13 +113,11 @@ struct dma_chan_percpu {
|
||||
* @cookie: last cookie value returned to client
|
||||
* @chan_id: channel ID for sysfs
|
||||
* @dev: class device for sysfs
|
||||
* @refcount: kref, used in "bigref" slow-mode
|
||||
* @slow_ref: indicates that the DMA channel is free
|
||||
* @rcu: the DMA channel's RCU head
|
||||
* @device_node: used to add this to the device chan list
|
||||
* @local: per-cpu pointer to a struct dma_chan_percpu
|
||||
* @client-count: how many clients are using this channel
|
||||
* @table_count: number of appearances in the mem-to-mem allocation table
|
||||
* @private: private data for certain client-channel associations
|
||||
*/
|
||||
struct dma_chan {
|
||||
struct dma_device *device;
|
||||
@@ -134,6 +131,7 @@ struct dma_chan {
|
||||
struct dma_chan_percpu *local;
|
||||
int client_count;
|
||||
int table_count;
|
||||
void *private;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -211,8 +209,6 @@ struct dma_async_tx_descriptor {
|
||||
* @global_node: list_head for global dma_device_list
|
||||
* @cap_mask: one or more dma_capability flags
|
||||
* @max_xor: maximum number of xor sources, 0 if no capability
|
||||
* @refcount: reference count
|
||||
* @done: IO completion struct
|
||||
* @dev_id: unique device ID
|
||||
* @dev: struct device reference for dma mapping api
|
||||
* @device_alloc_chan_resources: allocate resources and return the
|
||||
@@ -225,6 +221,7 @@ struct dma_async_tx_descriptor {
|
||||
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
|
||||
* @device_prep_slave_sg: prepares a slave dma operation
|
||||
* @device_terminate_all: terminate all pending operations
|
||||
* @device_is_tx_complete: poll for transaction completion
|
||||
* @device_issue_pending: push pending transactions to hardware
|
||||
*/
|
||||
struct dma_device {
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* include/linux/firmware-map.h:
|
||||
* Copyright (C) 2008 SUSE LINUX Products GmbH
|
||||
* by Bernhard Walle <bwalle@suse.de>
|
||||
* by Bernhard Walle <bernhard.walle@gmx.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License v2.0 as published by
|
||||
|
@@ -54,24 +54,30 @@ struct inodes_stat_t {
|
||||
#define MAY_ACCESS 16
|
||||
#define MAY_OPEN 32
|
||||
|
||||
/*
|
||||
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
|
||||
* to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
|
||||
*/
|
||||
|
||||
/* file is open for reading */
|
||||
#define FMODE_READ ((__force fmode_t)1)
|
||||
/* file is open for writing */
|
||||
#define FMODE_WRITE ((__force fmode_t)2)
|
||||
/* file is seekable */
|
||||
#define FMODE_LSEEK ((__force fmode_t)4)
|
||||
/* file can be accessed using pread/pwrite */
|
||||
/* file can be accessed using pread */
|
||||
#define FMODE_PREAD ((__force fmode_t)8)
|
||||
#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
|
||||
/* file can be accessed using pwrite */
|
||||
#define FMODE_PWRITE ((__force fmode_t)16)
|
||||
/* File is opened for execution with sys_execve / sys_uselib */
|
||||
#define FMODE_EXEC ((__force fmode_t)16)
|
||||
#define FMODE_EXEC ((__force fmode_t)32)
|
||||
/* File is opened with O_NDELAY (only set for block devices) */
|
||||
#define FMODE_NDELAY ((__force fmode_t)32)
|
||||
#define FMODE_NDELAY ((__force fmode_t)64)
|
||||
/* File is opened with O_EXCL (only set for block devices) */
|
||||
#define FMODE_EXCL ((__force fmode_t)64)
|
||||
#define FMODE_EXCL ((__force fmode_t)128)
|
||||
/* File is opened using open(.., 3, ..) and is writeable only for ioctls
|
||||
(specialy hack for floppy.c) */
|
||||
#define FMODE_WRITE_IOCTL ((__force fmode_t)128)
|
||||
#define FMODE_WRITE_IOCTL ((__force fmode_t)256)
|
||||
|
||||
/*
|
||||
* Don't update ctime and mtime.
|
||||
@@ -87,10 +93,10 @@ struct inodes_stat_t {
|
||||
#define WRITE 1
|
||||
#define READA 2 /* read-ahead - don't block if no resources */
|
||||
#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
|
||||
#define READ_SYNC (READ | (1 << BIO_RW_SYNC))
|
||||
#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
|
||||
#define READ_META (READ | (1 << BIO_RW_META))
|
||||
#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
|
||||
#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC))
|
||||
#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
|
||||
#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
|
||||
#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
|
||||
#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
|
||||
#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
|
||||
|
@@ -511,7 +511,6 @@ struct hd_driveid {
|
||||
unsigned short words69_70[2]; /* reserved words 69-70
|
||||
* future command overlap and queuing
|
||||
*/
|
||||
/* HDIO_GET_IDENTITY currently returns only words 0 through 70 */
|
||||
unsigned short words71_74[4]; /* reserved words 71-74
|
||||
* for IDENTIFY PACKET DEVICE command
|
||||
*/
|
||||
|
@@ -33,7 +33,7 @@
|
||||
*/
|
||||
#define I2C_RETRIES 0x0701 /* number of times a device address should
|
||||
be polled when not acknowledging */
|
||||
#define I2C_TIMEOUT 0x0702 /* set timeout in jiffies - call with int */
|
||||
#define I2C_TIMEOUT 0x0702 /* set timeout in units of 10 ms */
|
||||
|
||||
/* NOTE: Slave address is 7 or 10 bits, but 10-bit addresses
|
||||
* are NOT supported! (due to code brokenness)
|
||||
|
@@ -361,7 +361,7 @@ struct i2c_adapter {
|
||||
struct mutex bus_lock;
|
||||
struct mutex clist_lock;
|
||||
|
||||
int timeout;
|
||||
int timeout; /* in jiffies */
|
||||
int retries;
|
||||
struct device dev; /* the adapter device */
|
||||
|
||||
|
@@ -663,7 +663,7 @@ typedef struct ide_drive_s ide_drive_t;
|
||||
#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
|
||||
|
||||
#define to_ide_drv(obj, cont_type) \
|
||||
container_of(obj, struct cont_type, kref)
|
||||
container_of(obj, struct cont_type, dev)
|
||||
|
||||
#define ide_drv_g(disk, cont_type) \
|
||||
container_of((disk)->private_data, struct cont_type, driver)
|
||||
@@ -866,6 +866,7 @@ struct ide_host {
|
||||
unsigned int n_ports;
|
||||
struct device *dev[2];
|
||||
unsigned int (*init_chipset)(struct pci_dev *);
|
||||
irq_handler_t irq_handler;
|
||||
unsigned long host_flags;
|
||||
void *host_priv;
|
||||
ide_hwif_t *cur_port; /* for hosts requiring serialization */
|
||||
|
@@ -210,6 +210,7 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
|
||||
|
||||
/* Move the mac addresses to the beginning of the new header. */
|
||||
memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
|
||||
skb->mac_header -= VLAN_HLEN;
|
||||
|
||||
/* first, the ethernet type */
|
||||
veth->h_vlan_proto = htons(ETH_P_8021Q);
|
||||
|
@@ -194,6 +194,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
||||
/* FSTS_REG */
|
||||
#define DMA_FSTS_PPF ((u32)2)
|
||||
#define DMA_FSTS_PFO ((u32)1)
|
||||
#define DMA_FSTS_IQE (1 << 4)
|
||||
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
|
||||
|
||||
/* FRCD_REG, 32 bits access */
|
||||
@@ -328,7 +329,7 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
||||
unsigned int size_order, u64 type,
|
||||
int non_present_entry_flush);
|
||||
|
||||
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
|
||||
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
||||
|
@@ -30,11 +30,14 @@
|
||||
* See Documentation/io_mapping.txt
|
||||
*/
|
||||
|
||||
/* this struct isn't actually defined anywhere */
|
||||
struct io_mapping;
|
||||
|
||||
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
|
||||
|
||||
struct io_mapping {
|
||||
resource_size_t base;
|
||||
unsigned long size;
|
||||
pgprot_t prot;
|
||||
};
|
||||
|
||||
/*
|
||||
* For small address space machines, mapping large objects
|
||||
* into the kernel virtual space isn't practical. Where
|
||||
@@ -43,23 +46,40 @@ struct io_mapping;
|
||||
*/
|
||||
|
||||
static inline struct io_mapping *
|
||||
io_mapping_create_wc(unsigned long base, unsigned long size)
|
||||
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
||||
{
|
||||
return (struct io_mapping *) base;
|
||||
struct io_mapping *iomap;
|
||||
|
||||
if (!is_io_mapping_possible(base, size))
|
||||
return NULL;
|
||||
|
||||
iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
|
||||
if (!iomap)
|
||||
return NULL;
|
||||
|
||||
iomap->base = base;
|
||||
iomap->size = size;
|
||||
iomap->prot = pgprot_writecombine(__pgprot(__PAGE_KERNEL));
|
||||
return iomap;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_free(struct io_mapping *mapping)
|
||||
{
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
/* Atomic map/unmap */
|
||||
static inline void *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
offset += (unsigned long) mapping;
|
||||
return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0,
|
||||
__pgprot(__PAGE_KERNEL_WC));
|
||||
resource_size_t phys_addr;
|
||||
unsigned long pfn;
|
||||
|
||||
BUG_ON(offset >= mapping->size);
|
||||
phys_addr = mapping->base + offset;
|
||||
pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
|
||||
return iomap_atomic_prot_pfn(pfn, KM_USER0, mapping->prot);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -71,8 +91,12 @@ io_mapping_unmap_atomic(void *vaddr)
|
||||
static inline void *
|
||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||
{
|
||||
offset += (unsigned long) mapping;
|
||||
return ioremap_wc(offset, PAGE_SIZE);
|
||||
resource_size_t phys_addr;
|
||||
|
||||
BUG_ON(offset >= mapping->size);
|
||||
phys_addr = mapping->base + offset;
|
||||
|
||||
return ioremap_wc(phys_addr, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -83,9 +107,12 @@ io_mapping_unmap(void *vaddr)
|
||||
|
||||
#else
|
||||
|
||||
/* this struct isn't actually defined anywhere */
|
||||
struct io_mapping;
|
||||
|
||||
/* Create the io_mapping object*/
|
||||
static inline struct io_mapping *
|
||||
io_mapping_create_wc(unsigned long base, unsigned long size)
|
||||
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
||||
{
|
||||
return (struct io_mapping *) ioremap_wc(base, size);
|
||||
}
|
||||
|
@@ -275,7 +275,7 @@ enum {
|
||||
* advised to wait only for the following duration before
|
||||
* doing SRST.
|
||||
*/
|
||||
ATA_TMOUT_PMP_SRST_WAIT = 1000,
|
||||
ATA_TMOUT_PMP_SRST_WAIT = 5000,
|
||||
|
||||
/* ATA bus states */
|
||||
BUS_UNKNOWN = 0,
|
||||
@@ -530,6 +530,7 @@ struct ata_queued_cmd {
|
||||
unsigned long flags; /* ATA_QCFLAG_xxx */
|
||||
unsigned int tag;
|
||||
unsigned int n_elem;
|
||||
unsigned int orig_n_elem;
|
||||
|
||||
int dma_dir;
|
||||
|
||||
@@ -750,7 +751,8 @@ struct ata_port {
|
||||
acpi_handle acpi_handle;
|
||||
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
|
||||
#endif
|
||||
u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
|
||||
/* owned by EH */
|
||||
u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
/* The following initializer overrides a method to NULL whether one of
|
||||
|
@@ -1041,10 +1041,23 @@ extern void free_bootmem_with_active_regions(int nid,
|
||||
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
|
||||
extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
|
||||
extern void sparse_memory_present_with_active_regions(int nid);
|
||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
extern int early_pfn_to_nid(unsigned long pfn);
|
||||
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
|
||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
||||
|
||||
#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
|
||||
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
|
||||
static inline int __early_pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
/* please see mm/page_alloc.c */
|
||||
extern int __meminit early_pfn_to_nid(unsigned long pfn);
|
||||
#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
/* there is a per-arch backend function. */
|
||||
extern int __meminit __early_pfn_to_nid(unsigned long pfn);
|
||||
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
|
||||
#endif
|
||||
|
||||
extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||
extern void memmap_init_zone(unsigned long, int, unsigned long,
|
||||
unsigned long, enum memmap_context);
|
||||
@@ -1159,6 +1172,7 @@ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
|
||||
|
||||
/* mm/page-writeback.c */
|
||||
int write_one_page(struct page *page, int wait);
|
||||
void task_dirty_inc(struct task_struct *tsk);
|
||||
|
||||
/* readahead.c */
|
||||
#define VM_MAX_READAHEAD 128 /* kbytes */
|
||||
|
@@ -1071,7 +1071,7 @@ void sparse_init(void);
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
|
||||
#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
|
||||
bool early_pfn_in_nid(unsigned long pfn, int nid);
|
||||
#else
|
||||
#define early_pfn_in_nid(pfn, nid) (1)
|
||||
#endif
|
||||
|
@@ -1079,6 +1079,7 @@ extern void synchronize_net(void);
|
||||
extern int register_netdevice_notifier(struct notifier_block *nb);
|
||||
extern int unregister_netdevice_notifier(struct notifier_block *nb);
|
||||
extern int init_dummy_netdev(struct net_device *dev);
|
||||
extern void netdev_resync_ops(struct net_device *dev);
|
||||
|
||||
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
|
||||
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#define _XT_NFLOG_TARGET
|
||||
|
||||
#define XT_NFLOG_DEFAULT_GROUP 0x1
|
||||
#define XT_NFLOG_DEFAULT_THRESHOLD 1
|
||||
#define XT_NFLOG_DEFAULT_THRESHOLD 0
|
||||
|
||||
#define XT_NFLOG_MASK 0x0
|
||||
|
||||
|
@@ -1445,6 +1445,7 @@
|
||||
#define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071
|
||||
#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072
|
||||
#define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073
|
||||
#define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1
|
||||
#define PCI_DEVICE_ID_NEO_2DB9 0x00C8
|
||||
#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9
|
||||
#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA
|
||||
@@ -2330,6 +2331,9 @@
|
||||
#define PCI_DEVICE_ID_INTEL_82378 0x0484
|
||||
#define PCI_DEVICE_ID_INTEL_I960 0x0960
|
||||
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
|
||||
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
|
||||
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
|
||||
#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
|
||||
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
|
||||
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
|
||||
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
|
||||
|
@@ -381,10 +381,12 @@ struct dev_pm_info {
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
extern void device_pm_lock(void);
|
||||
extern int sysdev_resume(void);
|
||||
extern void device_power_up(pm_message_t state);
|
||||
extern void device_resume(pm_message_t state);
|
||||
|
||||
extern void device_pm_unlock(void);
|
||||
extern int sysdev_suspend(pm_message_t state);
|
||||
extern int device_power_down(pm_message_t state);
|
||||
extern int device_suspend(pm_message_t state);
|
||||
extern int device_prepare_suspend(pm_message_t state);
|
||||
|
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
|
||||
#define rcu_enter_nohz() do { } while (0)
|
||||
#define rcu_exit_nohz() do { } while (0)
|
||||
|
||||
/* A context switch is a grace period for rcuclassic. */
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUCLASSIC_H */
|
||||
|
@@ -52,6 +52,9 @@ struct rcu_head {
|
||||
void (*func)(struct rcu_head *head);
|
||||
};
|
||||
|
||||
/* Internal to kernel, but needed by rcupreempt.h. */
|
||||
extern int rcu_scheduler_active;
|
||||
|
||||
#if defined(CONFIG_CLASSIC_RCU)
|
||||
#include <linux/rcuclassic.h>
|
||||
#elif defined(CONFIG_TREE_RCU)
|
||||
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
|
||||
|
||||
/* Internal to kernel */
|
||||
extern void rcu_init(void);
|
||||
extern void rcu_scheduler_starting(void);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
|
||||
#endif /* __LINUX_RCUPDATE_H */
|
||||
|
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
|
||||
#define rcu_exit_nohz() do { } while (0)
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/*
|
||||
* A context switch is a grace period for rcupreempt synchronize_rcu()
|
||||
* only during early boot, before the scheduler has been initialized.
|
||||
* So, how the heck do we get a context switch? Well, if the caller
|
||||
* invokes synchronize_rcu(), they are willing to accept a context
|
||||
* switch, so we simply pretend that one happened.
|
||||
*
|
||||
* After boot, there might be a blocked or preempted task in an RCU
|
||||
* read-side critical section, so we cannot then take the fastpath.
|
||||
*/
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1 && !rcu_scheduler_active;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUPREEMPT_H */
|
||||
|
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
|
||||
}
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/* A context switch is a grace period for rcutree. */
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUTREE_H */
|
||||
|
@@ -2291,9 +2291,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
|
||||
extern int sched_group_set_rt_period(struct task_group *tg,
|
||||
long rt_period_us);
|
||||
extern long sched_group_rt_period(struct task_group *tg);
|
||||
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern int task_can_switch_user(struct user_struct *up,
|
||||
struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
|
||||
{
|
||||
|
@@ -19,6 +19,7 @@ struct seq_file {
|
||||
size_t from;
|
||||
size_t count;
|
||||
loff_t index;
|
||||
loff_t read_pos;
|
||||
u64 version;
|
||||
struct mutex lock;
|
||||
const struct seq_operations *op;
|
||||
|
@@ -296,6 +296,7 @@ struct uart_port {
|
||||
#define UPF_HARDPPS_CD ((__force upf_t) (1 << 11))
|
||||
#define UPF_LOW_LATENCY ((__force upf_t) (1 << 13))
|
||||
#define UPF_BUGGY_UART ((__force upf_t) (1 << 14))
|
||||
#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
|
||||
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16))
|
||||
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
|
||||
#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
|
||||
|
@@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio)
|
||||
#define SERIO_FUJITSU 0x35
|
||||
#define SERIO_ZHENHUA 0x36
|
||||
#define SERIO_INEXIO 0x37
|
||||
#define SERIO_TOUCHIT213 0x37
|
||||
#define SERIO_TOUCHIT213 0x38
|
||||
#define SERIO_W8001 0x39
|
||||
|
||||
#endif
|
||||
|
@@ -416,15 +416,6 @@ extern void skb_over_panic(struct sk_buff *skb, int len,
|
||||
void *here);
|
||||
extern void skb_under_panic(struct sk_buff *skb, int len,
|
||||
void *here);
|
||||
extern void skb_truesize_bug(struct sk_buff *skb);
|
||||
|
||||
static inline void skb_truesize_check(struct sk_buff *skb)
|
||||
{
|
||||
int len = sizeof(struct sk_buff) + skb->len;
|
||||
|
||||
if (unlikely((int)skb->truesize < len))
|
||||
skb_truesize_bug(skb);
|
||||
}
|
||||
|
||||
extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
|
||||
int getfrag(void *from, char *to, int offset,
|
||||
|
@@ -127,6 +127,7 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
|
||||
void * __must_check __krealloc(const void *, size_t, gfp_t);
|
||||
void * __must_check krealloc(const void *, size_t, gfp_t);
|
||||
void kfree(const void *);
|
||||
void kzfree(const void *);
|
||||
size_t ksize(const void *);
|
||||
|
||||
/*
|
||||
|
@@ -83,6 +83,13 @@ extern int spi_bitbang_stop(struct spi_bitbang *spi);
|
||||
* int getmiso(struct spi_device *);
|
||||
* void spidelay(unsigned);
|
||||
*
|
||||
* setsck()'s is_on parameter is a zero/nonzero boolean.
|
||||
*
|
||||
* setmosi()'s is_on parameter is a zero/nonzero boolean.
|
||||
*
|
||||
* getmiso() is required to return 0 or 1 only. Any other value is invalid
|
||||
* and will result in improper operation.
|
||||
*
|
||||
* A non-inlined routine would call bitbang_txrx_*() routines. The
|
||||
* main loop could easily compile down to a handful of instructions,
|
||||
* especially if the delay is a NOP (to run at peak speed).
|
||||
|
@@ -11,13 +11,21 @@
|
||||
/* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
/* Flags for timerfd_settime. */
|
||||
/*
|
||||
* CAREFUL: Check include/asm-generic/fcntl.h when defining
|
||||
* new flags, since they might collide with O_* ones. We want
|
||||
* to re-use O_* flags that couldn't possibly have a meaning
|
||||
* from eventfd, in order to leave a free define-space for
|
||||
* shared O_* flags.
|
||||
*/
|
||||
#define TFD_TIMER_ABSTIME (1 << 0)
|
||||
|
||||
/* Flags for timerfd_create. */
|
||||
#define TFD_CLOEXEC O_CLOEXEC
|
||||
#define TFD_NONBLOCK O_NONBLOCK
|
||||
|
||||
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
|
||||
/* Flags for timerfd_create. */
|
||||
#define TFD_CREATE_FLAGS TFD_SHARED_FCNTL_FLAGS
|
||||
/* Flags for timerfd_settime. */
|
||||
#define TFD_SETTIME_FLAGS TFD_TIMER_ABSTIME
|
||||
|
||||
#endif /* _LINUX_TIMERFD_H */
|
||||
|
||||
|
@@ -13,6 +13,7 @@ struct user_namespace {
|
||||
struct kref kref;
|
||||
struct hlist_head uidhash_table[UIDHASH_SZ];
|
||||
struct user_struct *creator;
|
||||
struct work_struct destroyer;
|
||||
};
|
||||
|
||||
extern struct user_namespace init_user_ns;
|
||||
|
@@ -84,6 +84,10 @@ extern struct vm_struct *get_vm_area_caller(unsigned long size,
|
||||
unsigned long flags, void *caller);
|
||||
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
||||
unsigned long start, unsigned long end);
|
||||
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
|
||||
unsigned long flags,
|
||||
unsigned long start, unsigned long end,
|
||||
void *caller);
|
||||
extern struct vm_struct *get_vm_area_node(unsigned long size,
|
||||
unsigned long flags, int node,
|
||||
gfp_t gfp_mask);
|
||||
|
Reference in New Issue
Block a user