Merge branches 'for-4.2/i2c-hid', 'for-4.2/lenovo', 'for-4.2/plantronics', 'for-4.2/rmi', 'for-4.2/sensor-hub', 'for-4.2/sjoy', 'for-4.2/sony' and 'for-4.2/wacom' into for-linus

Conflicts:
	drivers/hid/wacom_wac.c
This commit is contained in:
8015 changed files with 333542 additions and 173658 deletions

View File

@@ -4,44 +4,6 @@
#include <uapi/linux/a.out.h>
#ifndef __ASSEMBLY__
#if defined (M_OLDSUN2)
#else
#endif
#if defined (M_68010)
#else
#endif
#if defined (M_68020)
#else
#endif
#if defined (M_SPARC)
#else
#endif
#if !defined (N_MAGIC)
#endif
#if !defined (N_BADMAG)
#endif
#if !defined (N_TXTOFF)
#endif
#if !defined (N_DATOFF)
#endif
#if !defined (N_TRELOFF)
#endif
#if !defined (N_DRELOFF)
#endif
#if !defined (N_SYMOFF)
#endif
#if !defined (N_STROFF)
#endif
#if !defined (N_TXTADDR)
#endif
#if defined(vax) || defined(hp300) || defined(pyr)
#endif
#ifdef sony
#endif /* Sony. */
#ifdef is68k
#endif
#if defined(m68k) && defined(PORTAR)
#endif
#ifdef linux
#include <asm/page.h>
#if defined(__i386__) || defined(__mc68000__)
@@ -51,34 +13,5 @@
#endif
#endif
#endif
#ifndef N_DATADDR
#endif
#if !defined (N_BSSADDR)
#endif
#if !defined (N_NLIST_DECLARED)
#endif /* no N_NLIST_DECLARED. */
#if !defined (N_UNDF)
#endif
#if !defined (N_ABS)
#endif
#if !defined (N_TEXT)
#endif
#if !defined (N_DATA)
#endif
#if !defined (N_BSS)
#endif
#if !defined (N_FN)
#endif
#if !defined (N_EXT)
#endif
#if !defined (N_TYPE)
#endif
#if !defined (N_STAB)
#endif
#if !defined (N_RELOCATION_INFO_DECLARED)
#ifdef NS32K
#else
#endif
#endif /* no N_RELOCATION_INFO_DECLARED. */
#endif /*__ASSEMBLY__ */
#endif /* __A_OUT_GNU_H__ */

View File

@@ -53,10 +53,16 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
return adev ? adev->handle : NULL;
}
#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
#define ACPI_COMPANION(dev) acpi_node((dev)->fwnode)
#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
static inline bool has_acpi_companion(struct device *dev)
{
return is_acpi_node(dev->fwnode);
}
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
@@ -73,6 +79,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOAPIC,
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -146,9 +153,14 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
@@ -471,6 +483,11 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
return NULL;
}
static inline bool has_acpi_companion(struct device *dev)
{
return false;
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
{
return NULL;

10
include/linux/acpi_irq.h Normal file
View File

@@ -0,0 +1,10 @@
#ifndef _LINUX_ACPI_IRQ_H
#define _LINUX_ACPI_IRQ_H
#include <linux/irq.h>
#ifndef acpi_irq_init
static inline void acpi_irq_init(void) { }
#endif
#endif /* _LINUX_ACPI_IRQ_H */

View File

@@ -1,86 +1,23 @@
#ifndef __LINUX__AIO_H
#define __LINUX__AIO_H
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/aio_abi.h>
#include <linux/uio.h>
#include <linux/rcupdate.h>
#include <linux/atomic.h>
struct kioctx;
struct kiocb;
struct mm_struct;
#define KIOCB_KEY 0
/*
* We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
* cancelled or completed (this makes a certain amount of sense because
* successful cancellation - io_cancel() - does deliver the completion to
* userspace).
*
* And since most things don't implement kiocb cancellation and we'd really like
* kiocb completion to be lockless when possible, we use ki_cancel to
* synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
* with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
*/
#define KIOCB_CANCELLED ((void *) (~0ULL))
typedef int (kiocb_cancel_fn)(struct kiocb *);
struct kiocb {
struct file *ki_filp;
struct kioctx *ki_ctx; /* NULL for sync ops */
kiocb_cancel_fn *ki_cancel;
void *private;
union {
void __user *user;
struct task_struct *tsk;
} ki_obj;
__u64 ki_user_data; /* user's data for completion */
loff_t ki_pos;
size_t ki_nbytes; /* copy of iocb->aio_nbytes */
struct list_head ki_list; /* the aio core uses this
* for cancellation */
/*
* If the aio_resfd field of the userspace iocb is not zero,
* this is the underlying eventfd context to deliver events to.
*/
struct eventfd_ctx *ki_eventfd;
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
return kiocb->ki_ctx == NULL;
}
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_ctx = NULL,
.ki_filp = filp,
.ki_obj.tsk = current,
};
}
/* prototypes */
#ifdef CONFIG_AIO
extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
extern void aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat);
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
static inline long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user * __user *iocbpp,
@@ -89,11 +26,6 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
static inline struct kiocb *list_kiocb(struct list_head *h)
{
return list_entry(h, struct kiocb, ki_list);
}
/* for sysctl: */
extern unsigned long aio_nr;
extern unsigned long aio_max_nr;

View File

@@ -24,16 +24,22 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <asm/arm-cci.h>
struct device_node;
#ifdef CONFIG_ARM_CCI
extern bool cci_probed(void);
#else
static inline bool cci_probed(void) { return false; }
#endif
#ifdef CONFIG_ARM_CCI400_PORT_CTRL
extern int cci_ace_get_port(struct device_node *dn);
extern int cci_disable_port_by_cpu(u64 mpidr);
extern int __cci_control_port_by_device(struct device_node *dn, bool enable);
extern int __cci_control_port_by_index(u32 port, bool enable);
#else
static inline bool cci_probed(void) { return false; }
static inline int cci_ace_get_port(struct device_node *dn)
{
return -ENODEV;
@@ -49,6 +55,7 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
return -ENODEV;
}
#endif
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \

View File

@@ -60,12 +60,15 @@ struct dma_chan_ref {
* dependency chain
* @ASYNC_TX_FENCE: specify that the next operation in the dependency
* chain uses this operation's result as an input
* @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
* input data. Required for rmw case.
*/
enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1),
ASYNC_TX_ACK = (1 << 2),
ASYNC_TX_FENCE = (1 << 3),
ASYNC_TX_PQ_XOR_DST = (1 << 4),
};
/**

View File

@@ -0,0 +1,34 @@
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __BCM47XX_NVRAM_H
#define __BCM47XX_NVRAM_H
#include <linux/types.h>
#include <linux/kernel.h>
#ifdef CONFIG_BCM47XX
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
#else
static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
return -ENOTSUPP;
};
static inline int bcm47xx_nvram_getenv(const char *name, char *val,
size_t val_len)
{
return -ENOTSUPP;
};
static inline int bcm47xx_nvram_gpio_pin(const char *name)
{
return -ENOTSUPP;
};
#endif
#endif /* __BCM47XX_NVRAM_H */

View File

@@ -434,6 +434,27 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
return bcma_find_core_unit(bus, coreid, 0);
}
#ifdef CONFIG_BCMA_HOST_PCI
extern void bcma_host_pci_up(struct bcma_bus *bus);
extern void bcma_host_pci_down(struct bcma_bus *bus);
extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
struct bcma_device *core, bool enable);
#else
static inline void bcma_host_pci_up(struct bcma_bus *bus)
{
}
static inline void bcma_host_pci_down(struct bcma_bus *bus)
{
}
static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
struct bcma_device *core, bool enable)
{
if (bus->hosttype == BCMA_HOSTTYPE_PCI)
return -ENOTSUPP;
return 0;
}
#endif
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
extern int bcma_core_enable(struct bcma_device *core, u32 flags);

View File

@@ -663,14 +663,6 @@ struct bcma_drv_cc_b {
#define bcma_cc_maskset32(cc, offset, mask, set) \
bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
@@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
/* PMU support */
extern void bcma_pmu_init(struct bcma_drv_cc *cc);
extern void bcma_pmu_early_init(struct bcma_drv_cc *cc);
extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value);
extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,

View File

@@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn {
#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val)
#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
#else
static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
#endif
#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */

View File

@@ -39,21 +39,6 @@ struct bcma_drv_mips {
u8 early_setup_done:1;
};
#ifdef CONFIG_BCMA_DRIVER_MIPS
extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
#else
static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
return 0;
}
#endif
extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */

View File

@@ -238,13 +238,13 @@ struct bcma_drv_pci {
#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
extern void bcma_core_pci_up(struct bcma_bus *bus);
extern void bcma_core_pci_down(struct bcma_bus *bus);
#ifdef CONFIG_BCMA_DRIVER_PCI
extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
#else
static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
{
}
#endif
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);

View File

@@ -143,6 +143,8 @@
struct bcma_drv_pcie2 {
struct bcma_device *core;
u16 reqsize;
};
#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset)
@@ -153,6 +155,4 @@ struct bcma_drv_pcie2 {
#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set)
#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask)
void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */

View File

@@ -172,12 +172,8 @@ extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
)
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
@@ -287,16 +283,16 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_empty(src, nbits);
return find_first_bit(src, nbits) == nbits;
}
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_full(src, nbits);
return find_first_zero_bit(src, nbits) == nbits;
}
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)

View File

@@ -218,9 +218,9 @@ static inline unsigned long __ffs64(u64 word)
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum size to search
* @size: The number of bits to search
*
* Returns the bit number of the first set bit, or size.
* Returns the bit number of the last set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);

View File

@@ -13,7 +13,7 @@ struct blk_mq_cpu_notifier {
};
struct blk_mq_ctxmap {
unsigned int map_size;
unsigned int size;
unsigned int bits_per_word;
struct blk_align_bitmap *map;
};
@@ -164,6 +164,8 @@ enum {
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
@@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv);
@@ -227,7 +230,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
* size to get back to the original request.
* size to get back to the original request, add request size to get the PDU.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
@@ -235,7 +238,7 @@ static inline struct request *blk_mq_rq_from_pdu(void *pdu)
}
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
return (void *) rq + sizeof(*rq);
return rq + 1;
}
#define queue_for_each_hw_ctx(q, hctx, i) \

View File

@@ -220,7 +220,7 @@ enum rq_flag_bits {
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)

View File

@@ -32,23 +32,19 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
struct bpf_map_ops *ops;
const struct bpf_map_ops *ops;
struct work_struct work;
};
struct bpf_map_type_list {
struct list_head list_node;
struct bpf_map_ops *ops;
const struct bpf_map_ops *ops;
enum bpf_map_type type;
};
void bpf_register_map_type(struct bpf_map_type_list *tl);
void bpf_map_put(struct bpf_map *map);
struct bpf_map *bpf_map_get(struct fd f);
/* function argument constraints */
enum bpf_arg_type {
ARG_ANYTHING = 0, /* any argument is ok */
ARG_DONTCARE = 0, /* unused argument in helper function */
/* the following constraints used to prototype
* bpf_map_lookup/update/delete_elem() functions
@@ -62,6 +58,9 @@ enum bpf_arg_type {
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
};
/* type of values returned from helper functions */
@@ -105,41 +104,61 @@ struct bpf_verifier_ops {
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
struct bpf_insn *insn);
};
struct bpf_prog_type_list {
struct list_head list_node;
struct bpf_verifier_ops *ops;
const struct bpf_verifier_ops *ops;
enum bpf_prog_type type;
};
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
struct bpf_prog;
struct bpf_prog_aux {
atomic_t refcnt;
bool is_gpl_compatible;
enum bpf_prog_type prog_type;
struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
u32 used_map_cnt;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
struct work_struct work;
};
#ifdef CONFIG_BPF_SYSCALL
void bpf_prog_put(struct bpf_prog *prog);
#else
static inline void bpf_prog_put(struct bpf_prog *prog) {}
#endif
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
void bpf_prog_put(struct bpf_prog *prog);
struct bpf_map *bpf_map_get(struct fd f);
void bpf_map_put(struct bpf_map *map);
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
#else
static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
{
}
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
extern struct bpf_func_proto bpf_map_lookup_elem_proto;
extern struct bpf_func_proto bpf_map_update_elem_proto;
extern struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
#endif /* _LINUX_BPF_H */

View File

@@ -11,6 +11,7 @@
#define PHY_ID_BCM5421 0x002060e0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM54616S 0x03625d10
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
@@ -19,6 +20,7 @@
#define PHY_ID_BCM7425 0x03625e60
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
#define PHY_BCM_OUI_MASK 0xfffffc00

View File

@@ -61,6 +61,8 @@ struct can_priv {
char tx_led_trig_name[CAN_LED_NAME_SZ];
struct led_trigger *rx_led_trig;
char rx_led_trig_name[CAN_LED_NAME_SZ];
struct led_trigger *rxtx_led_trig;
char rxtx_led_trig_name[CAN_LED_NAME_SZ];
#endif
};

View File

@@ -21,8 +21,10 @@ enum can_led_event {
#ifdef CONFIG_CAN_LEDS
/* keep space for interface name + "-tx"/"-rx" suffix and null terminator */
#define CAN_LED_NAME_SZ (IFNAMSIZ + 4)
/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
* suffix and null terminator
*/
#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
void can_led_event(struct net_device *netdev, enum can_led_event event);
void devm_can_led_init(struct net_device *netdev);

View File

@@ -44,16 +44,11 @@ static inline void can_skb_reserve(struct sk_buff *skb)
skb_reserve(skb, sizeof(struct can_skb_priv));
}
static inline void can_skb_destructor(struct sk_buff *skb)
{
sock_put(skb->sk);
}
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
if (sk) {
sock_hold(sk);
skb->destructor = can_skb_destructor;
skb->destructor = sock_efree;
skb->sk = sk;
}
}

View File

@@ -205,6 +205,7 @@ static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
cap_intersect(permitted, __cap_nfsd_set));
}
#ifdef CONFIG_MULTIUSER
extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
@@ -213,6 +214,34 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
#else
static inline bool has_capability(struct task_struct *t, int cap)
{
return true;
}
static inline bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap)
{
return true;
}
static inline bool has_capability_noaudit(struct task_struct *t, int cap)
{
return true;
}
static inline bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap)
{
return true;
}
static inline bool capable(int cap)
{
return true;
}
static inline bool ns_capable(struct user_namespace *ns, int cap)
{
return true;
}
#endif /* CONFIG_MULTIUSER */
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);

View File

@@ -50,6 +50,19 @@
#define CEPH_FEATURE_MDS_INLINE_DATA (1ULL<<40)
#define CEPH_FEATURE_CRUSH_TUNABLES3 (1ULL<<41)
#define CEPH_FEATURE_OSD_PRIMARY_AFFINITY (1ULL<<41) /* overlap w/ tunables3 */
#define CEPH_FEATURE_MSGR_KEEPALIVE2 (1ULL<<42)
#define CEPH_FEATURE_OSD_POOLRESEND (1ULL<<43)
#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 (1ULL<<44)
#define CEPH_FEATURE_OSD_SET_ALLOC_HINT (1ULL<<45)
#define CEPH_FEATURE_OSD_FADVISE_FLAGS (1ULL<<46)
#define CEPH_FEATURE_OSD_REPOP (1ULL<<46) /* overlap with fadvise */
#define CEPH_FEATURE_OSD_OBJECT_DIGEST (1ULL<<46) /* overlap with fadvise */
#define CEPH_FEATURE_OSD_TRANSACTION_MAY_LAYOUT (1ULL<<46) /* overlap w/ fadvise */
#define CEPH_FEATURE_MDS_QUOTA (1ULL<<47)
#define CEPH_FEATURE_CRUSH_V4 (1ULL<<48) /* straw2 buckets */
#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -93,7 +106,8 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY)
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \

View File

@@ -323,6 +323,7 @@ enum {
CEPH_MDS_OP_MKSNAP = 0x01400,
CEPH_MDS_OP_RMSNAP = 0x01401,
CEPH_MDS_OP_LSSNAP = 0x00402,
CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
extern const char *ceph_mds_op_name(int op);

View File

@@ -7,13 +7,7 @@
#define CEPH_DEFINE_SHOW_FUNC(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
struct seq_file *sf; \
int ret; \
\
ret = single_open(file, name, NULL); \
sf = file->private_data; \
sf->private = inode->i_private; \
return ret; \
return single_open(file, name, inode->i_private); \
} \
\
static const struct file_operations name##_fops = { \

View File

@@ -135,6 +135,7 @@ struct ceph_client {
struct dentry *debugfs_dir;
struct dentry *debugfs_monmap;
struct dentry *debugfs_osdmap;
struct dentry *debugfs_options;
#endif
};
@@ -191,6 +192,7 @@ extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
void *private);
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);

View File

@@ -175,13 +175,12 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
__u8 version;
if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
pr_warning("incomplete pg encoding");
pr_warn("incomplete pg encoding\n");
return -EINVAL;
}
version = ceph_decode_8(p);
if (version > 1) {
pr_warning("do not understand pg encoding %d > 1",
pr_warn("do not understand pg encoding %d > 1\n",
(int)version);
return -EINVAL;
}

View File

@@ -5,6 +5,10 @@
#include <linux/exportfs.h>
#include <linux/mm.h>
#define CLEANCACHE_NO_POOL -1
#define CLEANCACHE_NO_BACKEND -2
#define CLEANCACHE_NO_BACKEND_SHARED -3
#define CLEANCACHE_KEY_MAX 6
/*
@@ -33,10 +37,9 @@ struct cleancache_ops {
void (*invalidate_fs)(int);
};
extern struct cleancache_ops *
cleancache_register_ops(struct cleancache_ops *ops);
extern int cleancache_register_ops(struct cleancache_ops *ops);
extern void __cleancache_init_fs(struct super_block *);
extern void __cleancache_init_shared_fs(char *, struct super_block *);
extern void __cleancache_init_shared_fs(struct super_block *);
extern int __cleancache_get_page(struct page *);
extern void __cleancache_put_page(struct page *);
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
@@ -78,10 +81,10 @@ static inline void cleancache_init_fs(struct super_block *sb)
__cleancache_init_fs(sb);
}
static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
static inline void cleancache_init_shared_fs(struct super_block *sb)
{
if (cleancache_enabled)
__cleancache_init_shared_fs(uuid, sb);
__cleancache_init_shared_fs(sb);
}
static inline int cleancache_get_page(struct page *page)

View File

@@ -541,7 +541,7 @@ struct clk_gpio {
extern const struct clk_ops clk_gpio_gate_ops;
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
const char *parent_name, struct gpio_desc *gpio,
const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags);
void of_gpio_clk_gate_setup(struct device_node *node);

View File

@@ -20,10 +20,10 @@
extern void __iomem *at91_pmc_base;
#define at91_pmc_read(field) \
__raw_readl(at91_pmc_base + field)
readl_relaxed(at91_pmc_base + field)
#define at91_pmc_write(field, value) \
__raw_writel(value, at91_pmc_base + field)
writel_relaxed(value, at91_pmc_base + field)
#else
.extern at91_pmc_base
#endif

View File

@@ -16,6 +16,7 @@
#include <linux/types.h>
void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);

View File

@@ -215,14 +215,14 @@ struct ti_dt_clk {
.node_name = name, \
}
/* Maximum number of clock memmaps */
#define CLK_MAX_MEMMAPS 4
/* Static memmap indices */
enum {
TI_CLKM_CM = 0,
TI_CLKM_CM2,
TI_CLKM_PRM,
TI_CLKM_SCRM,
TI_CLKM_CTRL,
CLK_MAX_MEMMAPS
};
typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);

View File

@@ -253,4 +253,10 @@ extern void clocksource_of_init(void);
static inline void clocksource_of_init(void) {}
#endif
#ifdef CONFIG_ACPI
void acpi_generic_timer_init(void);
#else
static inline void acpi_generic_timer_init(void) { }
#endif
#endif /* _LINUX_CLOCKSOURCE_H */

View File

@@ -16,16 +16,16 @@
struct cma;
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(struct cma *cma);
extern unsigned long cma_get_size(struct cma *cma);
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, struct cma **res_cma);
extern int cma_init_reserved_mem(phys_addr_t base,
phys_addr_t size, int order_per_bit,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
extern bool cma_release(struct cma *cma, struct page *pages, int count);
extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif

View File

@@ -34,6 +34,7 @@ extern int sysctl_compaction_handler(struct ctl_table *table, int write,
extern int sysctl_extfrag_threshold;
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,

View File

@@ -9,10 +9,24 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
/*
* This version is i.e. to prevent dead stores elimination on @ptr
* where gcc and llvm may behave differently when otherwise using
* normal barrier(): while gcc behavior gets along with a normal
* barrier(), llvm needs an explicit input variable to be assumed
* clobbered. The issue is as follows: while the inline asm might
* access any memory it wants, the compiler could have fit all of
* @ptr into memory registers instead, and since @ptr never escaped
* from that, it proofed that the inline asm wasn't touching any of
* it. This version works well with both compilers, i.e. we're telling
* the compiler that the inline asm absolutely may see the contents
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
*/
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
/*
* This macro obfuscates arithmetic on a variable address so that gcc

View File

@@ -13,9 +13,12 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
#undef barrier_data
#undef RELOC_HIDE
#undef OPTIMIZER_HIDE_VAR
#define barrier_data(ptr) barrier()
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \

View File

@@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier() __memory_barrier()
#endif
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)

View File

@@ -123,7 +123,7 @@ struct console {
struct tty_driver *(*device)(struct console *, int *);
void (*unblank)(void);
int (*setup)(struct console *, char *);
int (*early_setup)(void);
int (*match)(struct console *, char *name, int idx, char *options);
short flags;
short index;
int cflag;
@@ -141,7 +141,6 @@ extern int console_set_on_cmdline;
extern struct console *early_console;
extern int add_preferred_console(char *name, int idx, char *options);
extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
extern struct console *console_drivers;

View File

@@ -10,6 +10,8 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
extern void context_tracking_enter(enum ctx_state state);
extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
extern void __context_tracking_task_switch(struct task_struct *prev,
@@ -35,7 +37,8 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
context_tracking_user_exit();
if (prev_ctx != CONTEXT_KERNEL)
context_tracking_exit(prev_ctx);
return prev_ctx;
}
@@ -43,8 +46,8 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
if (context_tracking_is_enabled()) {
if (prev_ctx == IN_USER)
context_tracking_user_enter();
if (prev_ctx != CONTEXT_KERNEL)
context_tracking_enter(prev_ctx);
}
}
@@ -78,10 +81,16 @@ static inline void guest_enter(void)
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
if (context_tracking_is_enabled())
context_tracking_enter(CONTEXT_GUEST);
}
static inline void guest_exit(void)
{
if (context_tracking_is_enabled())
context_tracking_exit(CONTEXT_GUEST);
if (vtime_accounting_enabled())
vtime_guest_exit(current);
else

View File

@@ -13,8 +13,9 @@ struct context_tracking {
*/
bool active;
enum ctx_state {
IN_KERNEL = 0,
IN_USER,
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,
} state;
};
@@ -34,11 +35,13 @@ static inline bool context_tracking_cpu_is_enabled(void)
static inline bool context_tracking_in_user(void)
{
return __this_cpu_read(context_tracking.state) == IN_USER;
return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
static inline bool context_tracking_is_enabled(void) { return false; }
static inline bool context_tracking_cpu_is_enabled(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif

View File

@@ -73,6 +73,7 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
CPU_PRI_SMPBOOT = 9,
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
@@ -95,6 +96,10 @@ enum {
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
* idle loop. */
#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
* perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
* operation in progress
@@ -161,6 +166,7 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb)
}
#endif
void smpboot_thread_init(void);
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
@@ -208,6 +214,10 @@ static inline void cpu_notifier_register_done(void)
{
}
static inline void smpboot_thread_init(void)
{
}
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -271,4 +281,14 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
DECLARE_PER_CPU(bool, cpu_dead_idle);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_CPU_H_ */

View File

@@ -11,6 +11,7 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
/**
@@ -289,11 +290,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpumask: the cpumask pointer
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
*
* No static inline type checking - see Subtlety (1) above.
*/
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -609,9 +610,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*/
static inline size_t cpumask_size(void)
{
/* FIXME: Once all cpumask assignments are eliminated, this
* can be nr_cpumask_bits */
return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
}
/*
@@ -768,7 +767,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#else /* NR_CPUS > BITS_PER_LONG */
@@ -776,7 +775,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#endif /* NR_CPUS > BITS_PER_LONG */
@@ -797,32 +796,18 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
nr_cpu_ids);
}
/*
*
* From here down, all obsolete. Use cpumask_ variants!
*
*/
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
#else
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
#endif
#endif /* NR_CPUS > BITS_PER_LONG */
#define CPU_MASK_NONE \
(cpumask_t) { { \
@@ -834,143 +819,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
#if NR_CPUS == 1
#define first_cpu(src) ({ (void)(src); 0; })
#define next_cpu(n, src) ({ (void)(src); 1; })
#define any_online_cpu(mask) 0
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#else /* NR_CPUS > 1 */
int __first_cpu(const cpumask_t *srcp);
int __next_cpu(int n, const cpumask_t *srcp);
#define first_cpu(src) __first_cpu(&(src))
#define next_cpu(n, src) __next_cpu((n), &(src))
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = -1; \
(cpu) = next_cpu((cpu), (mask)), \
(cpu) < NR_CPUS; )
#endif /* SMP */
#if NR_CPUS <= 64
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
#else /* NR_CPUS > 64 */
int __next_cpu_nr(int n, const cpumask_t *srcp);
#define for_each_cpu_mask_nr(cpu, mask) \
for ((cpu) = -1; \
(cpu) = __next_cpu_nr((cpu), &(mask)), \
(cpu) < nr_cpu_ids; )
#endif /* NR_CPUS > 64 */
#define cpus_addr(src) ((src).bits)
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpu, dstp->bits);
}
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpu, dstp->bits);
}
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
static inline void __cpus_setall(cpumask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
static inline void __cpus_clear(cpumask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
/* No static inline type checking - see Subtlety (1) above. */
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpu, addr->bits);
}
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
static inline int __cpus_equal(const cpumask_t *src1p,
const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
static inline int __cpus_intersects(const cpumask_t *src1p,
const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
static inline int __cpus_subset(const cpumask_t *src1p,
const cpumask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
#define cpus_shift_left(dst, src, n) \
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
#endif /* __LINUX_CPUMASK_H */

View File

@@ -62,9 +62,27 @@ do { \
groups_free(group_info); \
} while (0)
extern struct group_info *groups_alloc(int);
extern struct group_info init_groups;
#ifdef CONFIG_MULTIUSER
extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
#else
static inline void groups_free(struct group_info *group_info)
{
}
static inline int in_group_p(kgid_t grp)
{
return 1;
}
static inline int in_egroup_p(kgid_t grp)
{
return 1;
}
#endif
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
@@ -74,9 +92,6 @@ extern bool may_setgroups(void);
#define GROUP_AT(gi, i) \
((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
/*
* The security context of a task
*

View File

@@ -96,13 +96,15 @@ struct crush_rule {
* uniform O(1) poor poor
* list O(n) optimal poor
* tree O(log n) good good
* straw O(n) optimal optimal
* straw O(n) better better
* straw2 O(n) optimal optimal
*/
enum {
CRUSH_BUCKET_UNIFORM = 1,
CRUSH_BUCKET_LIST = 2,
CRUSH_BUCKET_TREE = 3,
CRUSH_BUCKET_STRAW = 4
CRUSH_BUCKET_STRAW = 4,
CRUSH_BUCKET_STRAW2 = 5,
};
extern const char *crush_bucket_alg_name(int alg);
@@ -149,6 +151,11 @@ struct crush_bucket_straw {
__u32 *straws; /* 16-bit fixed point */
};
struct crush_bucket_straw2 {
struct crush_bucket h;
__u32 *item_weights; /* 16-bit fixed point */
};
/*
@@ -189,6 +196,7 @@ extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
extern void crush_destroy_bucket(struct crush_bucket *b);
extern void crush_destroy_rule(struct crush_rule *r);
extern void crush_destroy(struct crush_map *map);

View File

@@ -94,6 +94,12 @@
*/
#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
/*
* Mark a cipher as a service implementation only usable by another
* cipher and never by a normal user of the kernel crypto API
*/
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
* Transform masks and values (for crt_flags).
*/

View File

@@ -404,26 +404,11 @@ static inline bool d_mountpoint(const struct dentry *dentry)
/*
* Directory cache entry type accessor functions.
*/
static inline void __d_set_type(struct dentry *dentry, unsigned type)
{
dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
}
static inline void __d_clear_type(struct dentry *dentry)
{
__d_set_type(dentry, DCACHE_MISS_TYPE);
}
static inline void d_set_type(struct dentry *dentry, unsigned type)
{
spin_lock(&dentry->d_lock);
__d_set_type(dentry, type);
spin_unlock(&dentry->d_lock);
}
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_ENTRY_TYPE;
unsigned type = READ_ONCE(dentry->d_flags);
smp_rmb();
return type & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
@@ -482,6 +467,44 @@ static inline bool d_is_positive(const struct dentry *dentry)
return !d_is_negative(dentry);
}
/**
* d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs)
* @dentry: The dentry in question
*
* Returns true if the dentry represents either an absent name or a name that
* doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent
* a true miss, a whiteout that isn't represented by a 0,0 chardev or a
* fallthrough marker in an opaque directory.
*
* Note! (1) This should be used *only* by a filesystem to examine its own
* dentries. It should not be used to look at some other filesystem's
* dentries. (2) It should also be used in combination with d_inode() to get
* the inode. (3) The dentry may have something attached to ->d_lower and the
* type field of the flags may be set to something other than miss or whiteout.
*/
static inline bool d_really_is_negative(const struct dentry *dentry)
{
return dentry->d_inode == NULL;
}
/**
* d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs)
* @dentry: The dentry in question
*
* Returns true if the dentry represents a name that maps to an inode
* (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if
* that is represented on medium as a 0,0 chardev.
*
* Note! (1) This should be used *only* by a filesystem to examine its own
* dentries. It should not be used to look at some other filesystem's
* dentries. (2) It should also be used in combination with d_inode() to get
* the inode.
*/
static inline bool d_really_is_positive(const struct dentry *dentry)
{
return dentry->d_inode != NULL;
}
extern void d_set_fallthru(struct dentry *dentry);
static inline bool d_is_fallthru(const struct dentry *dentry)

View File

@@ -43,6 +43,7 @@ enum dccp_state {
DCCP_CLOSING = TCP_CLOSING,
DCCP_TIME_WAIT = TCP_TIME_WAIT,
DCCP_CLOSED = TCP_CLOSE,
DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
DCCP_PARTOPEN = TCP_MAX_STATES,
DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
DCCP_MAX_STATES
@@ -57,6 +58,7 @@ enum {
DCCPF_CLOSING = TCPF_CLOSING,
DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
DCCPF_CLOSED = TCPF_CLOSE,
DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
};
@@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk)
return NULL;
}
extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
extern void dccp_syn_ack_timeout(const struct request_sock *req);
#endif /* _LINUX_DCCP_H */

View File

@@ -91,7 +91,7 @@ struct devfreq_event_desc {
const char *name;
void *driver_data;
struct devfreq_event_ops *ops;
const struct devfreq_event_ops *ops;
};
#if defined(CONFIG_PM_DEVFREQ_EVENT)

View File

@@ -605,9 +605,4 @@ static inline unsigned long to_bytes(sector_t n)
return (n << SECTOR_SHIFT);
}
/*-----------------------------------------------------------------
* Helper for block layer and dm core operations
*---------------------------------------------------------------*/
int dm_underlying_device_busy(struct request_queue *q);
#endif /* _LINUX_DEVICE_MAPPER_H */

View File

@@ -38,6 +38,7 @@ struct class;
struct subsys_private;
struct bus_type;
struct device_node;
struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
@@ -650,14 +651,6 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
struct acpi_device;
struct acpi_dev_node {
#ifdef CONFIG_ACPI
struct acpi_device *companion;
#endif
};
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
@@ -703,7 +696,7 @@ struct acpi_dev_node {
* @cma_area: Contiguous memory area for dma allocations
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @acpi_node: Associated ACPI device node.
* @fwnode: Associated device node supplied by platform firmware.
* @devt: For creating the sysfs "dev".
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
@@ -779,7 +772,7 @@ struct device {
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
struct acpi_dev_node acpi_node; /* associated ACPI device node */
struct fwnode_handle *fwnode; /* firmware device node */
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
@@ -954,6 +947,9 @@ extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
/*
* Root device objects for grouping under /sys/devices
*/

View File

@@ -162,6 +162,33 @@ struct dma_buf_attachment {
void *priv;
};
/**
* struct dma_buf_export_info - holds information needed to export a dma_buf
* @exp_name: name of the exporting module - useful for debugging.
* @ops: Attach allocator-defined dma buf ops to the new buffer
* @size: Size of the buffer
* @flags: mode flags for the file
* @resv: reservation-object, NULL to allocate default one
* @priv: Attach private data of allocator to this buffer
*
* This structure holds the information required to export the buffer. Used
* with dma_buf_export() only.
*/
struct dma_buf_export_info {
const char *exp_name;
const struct dma_buf_ops *ops;
size_t size;
int flags;
struct reservation_object *resv;
void *priv;
};
/**
* helper macro for exporters; zeros and fills in most common values
*/
#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME }
/**
* get_dma_buf - convenience wrapper for get_file.
* @dmabuf: [in] pointer to dma_buf
@@ -181,12 +208,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *,
struct reservation_object *);
#define dma_buf_export(priv, ops, size, flags, resv) \
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);

View File

@@ -34,6 +34,10 @@ struct dma_map_ops {
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
/*
* map_sg returns 0 on error and a value > 0 on success.
* It should never return a value < 0.
*/
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);

48
include/linux/dma/hsu.h Normal file
View File

@@ -0,0 +1,48 @@
/*
* Driver for the High Speed UART DMA
*
* Copyright (C) 2015 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_data/dma-hsu.h>
struct hsu_dma;
/**
* struct hsu_dma_chip - representation of HSU DMA hardware
* @dev: struct device of the DMA controller
* @irq: irq line
* @regs: memory mapped I/O space
* @length: I/O space length
* @offset: offset of the I/O space where registers are located
* @hsu: struct hsu_dma that is filed by ->probe()
* @pdata: platform data for the DMA controller if provided
*/
struct hsu_dma_chip {
struct device *dev;
int irq;
void __iomem *regs;
unsigned int length;
unsigned int offset;
struct hsu_dma *hsu;
struct hsu_dma_platform_data *pdata;
};
/* Export to the internal users */
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
/* Export to the platform drivers */
int hsu_dma_probe(struct hsu_dma_chip *chip);
int hsu_dma_remove(struct hsu_dma_chip *chip);
#endif /* _DMA_HSU_H */

View File

@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -574,7 +570,6 @@ struct dma_tx_state {
* @copy_align: alignment shift for memcpy operations
* @xor_align: alignment shift for xor operations
* @pq_align: alignment shift for pq operations
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @src_addr_widths: bit mask of src addr widths the device supports
@@ -625,7 +620,6 @@ struct dma_device {
u8 copy_align;
u8 xor_align;
u8 pq_align;
u8 fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -826,12 +820,6 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
return dmaengine_check_align(dev->pq_align, off1, off2, len);
}
static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->fill_align, off1, off2, len);
}
static inline void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
@@ -1098,7 +1086,6 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
@@ -1116,27 +1103,4 @@ static inline struct dma_chan
return __dma_request_channel(mask, fn, fn_param);
}
/* --- Helper iov-locking functions --- */
struct dma_page_list {
char __user *base_address;
int nr_pages;
struct page **pages;
};
struct dma_pinned_list {
int nr_iovecs;
struct dma_page_list page_list[0];
};
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, struct page *page,
unsigned int offset, size_t len);
#endif /* DMAENGINE_H */

View File

@@ -0,0 +1,22 @@
#ifndef _ELF_RANDOMIZE_H
#define _ELF_RANDOMIZE_H
struct mm_struct;
#ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE
static inline unsigned long arch_mmap_rnd(void) { return 0; }
# if defined(arch_randomize_brk) && defined(CONFIG_COMPAT_BRK)
# define compat_brk_randomized
# endif
# ifndef arch_randomize_brk
# define arch_randomize_brk(mm) (mm->brk)
# endif
#else
extern unsigned long arch_mmap_rnd(void);
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
# ifdef CONFIG_COMPAT_BRK
# define compat_brk_randomized
# endif
#endif
#endif

View File

@@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops;
int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
int eth_rebuild_header(struct sk_buff *skb);
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);

View File

@@ -153,7 +153,7 @@ struct f2fs_orphan_block {
*/
struct f2fs_extent {
__le32 fofs; /* start file offset of the extent */
__le32 blk_addr; /* start block address of the extent */
__le32 blk; /* start block address of the extent */
__le32 len; /* lengh of the extent */
} __packed;
@@ -178,6 +178,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
F2FS_INLINE_XATTR_ADDRS - 1))

View File

@@ -21,4 +21,10 @@ struct space_resv {
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | \
FALLOC_FL_ZERO_RANGE | \
FALLOC_FL_INSERT_RANGE)
#endif /* _FALLOC_H_ */

View File

@@ -145,8 +145,6 @@ struct bpf_prog_aux;
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
#define BPF_PSEUDO_MAP_FD 1
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
@@ -310,9 +308,11 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
bool jited; /* Is our filter JIT'ed? */
bool gpl_compatible; /* Is our filter GPL compatible? */
u32 len; /* Number of filter blocks */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
/* Instructions for interpreter */
@@ -454,6 +454,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(VLAN_TAG_PRESENT);
BPF_ANCILLARY(PAY_OFFSET);
BPF_ANCILLARY(RANDOM);
BPF_ANCILLARY(VLAN_TPID);
}
/* Fallthrough. */
default:

View File

@@ -1,6 +1,8 @@
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
#include <linux/math64.h>
/*
* Simplistic fixed-point arithmetics.
* Hmm, I'm probably duplicating some code :(
@@ -29,59 +31,126 @@
#include <linux/types.h>
/* The type representing fixed-point values */
typedef s16 fixp_t;
#define FRAC_N 8
#define FRAC_MASK ((1<<FRAC_N)-1)
/* Not to be used directly. Use fixp_{cos,sin} */
static const fixp_t cos_table[46] = {
0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8,
0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD,
0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1,
0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078,
0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035,
0x002C, 0x0023, 0x001A, 0x0011, 0x0008, 0x0000
static const s32 sin_table[] = {
0x00000000, 0x023be165, 0x04779632, 0x06b2f1d2, 0x08edc7b6, 0x0b27eb5c,
0x0d61304d, 0x0f996a26, 0x11d06c96, 0x14060b67, 0x163a1a7d, 0x186c6ddd,
0x1a9cd9ac, 0x1ccb3236, 0x1ef74bf2, 0x2120fb82, 0x234815ba, 0x256c6f9e,
0x278dde6e, 0x29ac379f, 0x2bc750e8, 0x2ddf003f, 0x2ff31bdd, 0x32037a44,
0x340ff241, 0x36185aee, 0x381c8bb5, 0x3a1c5c56, 0x3c17a4e7, 0x3e0e3ddb,
0x3fffffff, 0x41ecc483, 0x43d464fa, 0x45b6bb5d, 0x4793a20f, 0x496af3e1,
0x4b3c8c11, 0x4d084650, 0x4ecdfec6, 0x508d9210, 0x5246dd48, 0x53f9be04,
0x55a6125a, 0x574bb8e5, 0x58ea90c2, 0x5a827999, 0x5c135399, 0x5d9cff82,
0x5f1f5ea0, 0x609a52d1, 0x620dbe8a, 0x637984d3, 0x64dd894f, 0x6639b039,
0x678dde6d, 0x68d9f963, 0x6a1de735, 0x6b598ea1, 0x6c8cd70a, 0x6db7a879,
0x6ed9eba0, 0x6ff389de, 0x71046d3c, 0x720c8074, 0x730baeec, 0x7401e4bf,
0x74ef0ebb, 0x75d31a5f, 0x76adf5e5, 0x777f903b, 0x7847d908, 0x7906c0af,
0x79bc384c, 0x7a6831b8, 0x7b0a9f8c, 0x7ba3751c, 0x7c32a67c, 0x7cb82884,
0x7d33f0c8, 0x7da5f5a3, 0x7e0e2e31, 0x7e6c924f, 0x7ec11aa3, 0x7f0bc095,
0x7f4c7e52, 0x7f834ecf, 0x7fb02dc4, 0x7fd317b3, 0x7fec09e1, 0x7ffb025e,
0x7fffffff
};
/* a: 123 -> 123.0 */
static inline fixp_t fixp_new(s16 a)
/**
* __fixp_sin32() returns the sin of an angle in degrees
*
* @degrees: angle, in degrees, from 0 to 360.
*
* The returned value ranges from -0x7fffffff to +0x7fffffff.
*/
static inline s32 __fixp_sin32(int degrees)
{
return a<<FRAC_N;
s32 ret;
bool negative = false;
if (degrees > 180) {
negative = true;
degrees -= 180;
}
if (degrees > 90)
degrees = 180 - degrees;
ret = sin_table[degrees];
return negative ? -ret : ret;
}
/* a: 0xFFFF -> -1.0
0x8000 -> 1.0
0x0000 -> 0.0
*/
static inline fixp_t fixp_new16(s16 a)
/**
* fixp_sin32() returns the sin of an angle in degrees
*
* @degrees: angle, in degrees. The angle can be positive or negative
*
* The returned value ranges from -0x7fffffff to +0x7fffffff.
*/
static inline s32 fixp_sin32(int degrees)
{
return ((s32)a)>>(16-FRAC_N);
degrees = (degrees % 360 + 360) % 360;
return __fixp_sin32(degrees);
}
static inline fixp_t fixp_cos(unsigned int degrees)
/* cos(x) = sin(x + 90 degrees) */
#define fixp_cos32(v) fixp_sin32((v) + 90)
/*
* 16 bits variants
*
* The returned value ranges from -0x7fff to 0x7fff
*/
#define fixp_sin16(v) (fixp_sin32(v) >> 16)
#define fixp_cos16(v) (fixp_cos32(v) >> 16)
/**
* fixp_sin32_rad() - calculates the sin of an angle in radians
*
* @radians: angle, in radians
* @twopi: value to be used for 2*pi
*
* Provides a variant for the cases where just 360
* values is not enough. This function uses linear
* interpolation to a wider range of values given by
* twopi var.
*
* Experimental tests gave a maximum difference of
* 0.000038 between the value calculated by sin() and
* the one produced by this function, when twopi is
* equal to 360000. That seems to be enough precision
* for practical purposes.
*
* Please notice that two high numbers for twopi could cause
* overflows, so the routine will not allow values of twopi
* bigger than 1^18.
*/
static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
{
int quadrant = (degrees / 90) & 3;
unsigned int i = degrees % 90;
int degrees;
s32 v1, v2, dx, dy;
s64 tmp;
if (quadrant == 1 || quadrant == 3)
i = 90 - i;
/*
* Avoid too large values for twopi, as we don't want overflows.
*/
BUG_ON(twopi > 1 << 18);
i >>= 1;
degrees = (radians * 360) / twopi;
tmp = radians - (degrees * twopi) / 360;
return (quadrant == 1 || quadrant == 2)? -cos_table[i] : cos_table[i];
degrees = (degrees % 360 + 360) % 360;
v1 = __fixp_sin32(degrees);
v2 = fixp_sin32(degrees + 1);
dx = twopi / 360;
dy = v2 - v1;
tmp *= dy;
return v1 + div_s64(tmp, dx);
}
static inline fixp_t fixp_sin(unsigned int degrees)
{
return -fixp_cos(degrees + 90);
}
/* cos(x) = sin(x + pi/2 radians) */
static inline fixp_t fixp_mult(fixp_t a, fixp_t b)
{
return ((s32)(a*b))>>FRAC_N;
}
#define fixp_cos32_rad(rad, twopi) \
fixp_sin32_rad(rad + twopi / 4, twopi)
#endif

View File

@@ -314,6 +314,33 @@ struct page;
struct address_space;
struct writeback_control;
#define IOCB_EVENTFD (1 << 0)
#define IOCB_APPEND (1 << 1)
#define IOCB_DIRECT (1 << 2)
struct kiocb {
struct file *ki_filp;
loff_t ki_pos;
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
void *private;
int ki_flags;
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
return kiocb->ki_complete == NULL;
}
static inline int iocb_flags(struct file *file);
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = iocb_flags(filp),
};
}
/*
* "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
@@ -361,7 +388,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
/*
* migrate the contents of a page to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -848,6 +875,7 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
@@ -893,8 +921,8 @@ struct file_lock_operations {
struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
unsigned long (*lm_owner_key)(struct file_lock *);
void (*lm_get_owner)(struct file_lock *, struct file_lock *);
void (*lm_put_owner)(struct file_lock *);
fl_owner_t (*lm_get_owner)(fl_owner_t);
void (*lm_put_owner)(fl_owner_t);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, int);
bool (*lm_break)(struct file_lock *);
@@ -1019,6 +1047,9 @@ extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
extern int lease_modify(struct file_lock *, int, struct list_head *);
struct files_struct;
extern void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1155,6 +1186,10 @@ static inline int lease_modify(struct file_lock *fl, int arg,
{
return -EINVAL;
}
struct files_struct;
static inline void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files) {}
#endif /* !CONFIG_FILE_LOCKING */
@@ -1540,8 +1575,6 @@ struct file_operations {
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
@@ -1617,6 +1650,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec **ret_pointer);
extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -1786,7 +1820,7 @@ struct super_operations {
#define I_SYNC (1 << __I_SYNC)
#define I_REFERENCED (1 << 8)
#define __I_DIO_WAKEUP 9
#define I_DIO_WAKEUP (1 << I_DIO_WAKEUP)
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
#define __I_DIRTY_TIME_EXPIRED 12
@@ -2145,7 +2179,7 @@ struct filename {
const __user char *uptr; /* original userland pointer */
struct audit_names *aname;
int refcnt;
bool separate; /* should "name" be freed? */
const char iname[];
};
extern long vfs_truncate(struct path *, loff_t);
@@ -2545,16 +2579,12 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos);
@@ -2592,12 +2622,13 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *,
loff_t, get_block_t, dio_iodone_t, int flags);
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
get_block_t, dio_iodone_t, int flags);
int dax_clear_blocks(struct inode *, sector_t block, long size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
#ifdef CONFIG_BLOCK
@@ -2613,27 +2644,56 @@ enum {
/* filesystem can handle aio writes beyond i_size */
DIO_ASYNC_EXTEND = 0x04,
/* inode/fs/bdev does not need truncate protection */
DIO_SKIP_DIO_COUNT = 0x08,
};
void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter, loff_t offset,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
loff_t offset, get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags);
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode, struct iov_iter *iter, loff_t offset,
get_block_t get_block)
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
struct inode *inode,
struct iov_iter *iter, loff_t offset,
get_block_t get_block)
{
return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
void inode_dio_wait(struct inode *inode);
void inode_dio_done(struct inode *inode);
/*
* inode_dio_begin - signal start of a direct I/O requests
* @inode: inode the direct I/O happens on
*
* This is called once we've finished processing a direct I/O request,
* and is used to wake up callers waiting for direct I/O to be quiesced.
*/
static inline void inode_dio_begin(struct inode *inode)
{
atomic_inc(&inode->i_dio_count);
}
/*
* inode_dio_end - signal finish of a direct I/O requests
* @inode: inode the direct I/O happens on
*
* This is called once we've finished processing a direct I/O request,
* and is used to wake up callers waiting for direct I/O to be quiesced.
*/
static inline void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -2662,7 +2722,6 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
extern int vfs_readdir(struct file *, filldir_t, void *);
extern int iterate_dir(struct file *, struct dir_context *);
extern int vfs_stat(const char __user *, struct kstat *);
@@ -2760,6 +2819,16 @@ static inline bool io_is_direct(struct file *filp)
return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
}
static inline int iocb_flags(struct file *file)
{
int res = 0;
if (file->f_flags & O_APPEND)
res |= IOCB_APPEND;
if (io_is_direct(file))
res |= IOCB_DIRECT;
return res;
}
static inline ino_t parent_ino(struct dentry *dentry)
{
ino_t res;

View File

@@ -13,6 +13,8 @@ struct vfsmount;
static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
{
init_waitqueue_head(&p->wait);
INIT_HLIST_NODE(&p->s_list);
INIT_HLIST_NODE(&p->m_list);
p->kill = kill;
}

View File

@@ -13,6 +13,7 @@ struct trace_array;
struct trace_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
struct trace_print_flags {
unsigned long mask;
@@ -45,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
const char *ftrace_print_array_seq(struct trace_seq *p,
const void *buf, int buf_len,
const void *buf, int count,
size_t el_size);
struct trace_iterator;
@@ -202,7 +203,7 @@ enum trace_reg {
struct ftrace_event_call;
struct ftrace_event_class {
char *system;
const char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
@@ -252,6 +253,7 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED_BIT,
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
};
/*
@@ -265,6 +267,7 @@ enum {
* it is best to clear the buffers that used it).
* USE_CALL_FILTER - For ftrace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -274,6 +277,7 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
};
struct ftrace_event_call {
@@ -285,7 +289,7 @@ struct ftrace_event_call {
struct tracepoint *tp;
};
struct trace_event event;
const char *print_fmt;
char *print_fmt;
struct event_filter *filter;
void *mod;
void *data;
@@ -303,6 +307,7 @@ struct ftrace_event_call {
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
int (*perf_perm)(struct ftrace_event_call *,
struct perf_event *);
@@ -548,6 +553,15 @@ event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
event_triggers_post_call(file, tt);
}
#ifdef CONFIG_BPF_SYSCALL
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
#else
static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
{
return 1;
}
#endif
enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,

27
include/linux/fwnode.h Normal file
View File

@@ -0,0 +1,27 @@
/*
* fwnode.h - Firmware device node object handle type definition.
*
* Copyright (C) 2015, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _LINUX_FWNODE_H_
#define _LINUX_FWNODE_H_
enum fwnode_type {
FWNODE_INVALID = 0,
FWNODE_OF,
FWNODE_ACPI,
FWNODE_PDATA,
};
struct fwnode_handle {
enum fwnode_type type;
struct fwnode_handle *secondary;
};
#endif

View File

@@ -30,6 +30,7 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
@@ -57,8 +58,10 @@ struct vm_area_struct;
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
* cannot handle allocation failures. This modifier is deprecated and no new
* users should be added.
* cannot handle allocation failures. New users should be evaluated carefully
* (and the flag should be used only when there is no reasonable failure policy)
* but it is definitely preferable to use the flag rather than opencode endless
* loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
@@ -85,6 +88,7 @@ struct vm_area_struct;
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
@@ -117,16 +121,6 @@ struct vm_area_struct;
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
/*
* GFP_THISNODE does not perform any reclaim, you most likely want to
* use __GFP_THISNODE to allocate from a given node without fallback!
*/
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else
#define GFP_THISNODE ((__force gfp_t)0)
#endif
/* This mask makes up all the page movable related flags */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)

View File

@@ -16,6 +16,15 @@ struct device;
*/
struct gpio_desc;
/**
* Struct containing an array of descriptors that can be obtained using
* gpiod_get_array().
*/
struct gpio_descs {
unsigned int ndescs;
struct gpio_desc *desc[];
};
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
@@ -34,6 +43,9 @@ enum gpiod_flags {
#ifdef CONFIG_GPIOLIB
/* Return the number of GPIOs associated with a device / function */
int gpiod_count(struct device *dev, const char *con_id);
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
@@ -49,7 +61,14 @@ struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
void gpiod_put(struct gpio_desc *desc);
void gpiod_put_array(struct gpio_descs *descs);
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -64,7 +83,14 @@ struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
@@ -110,9 +136,15 @@ struct fwnode_handle;
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname);
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
const char *con_id,
struct fwnode_handle *child);
#else /* CONFIG_GPIOLIB */
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
}
static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
@@ -142,6 +174,20 @@ __gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_descs *__must_check
gpiod_get_array(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_descs *__must_check
gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline void gpiod_put(struct gpio_desc *desc)
{
might_sleep();
@@ -150,6 +196,14 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
static inline void gpiod_put_array(struct gpio_descs *descs)
{
might_sleep();
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline struct gpio_desc *__must_check
__devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -181,6 +235,20 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_descs *__must_check
devm_gpiod_get_array(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
might_sleep();
@@ -189,6 +257,15 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
WARN_ON(1);
}
static inline void devm_gpiod_put_array(struct device *dev,
struct gpio_descs *descs)
{
might_sleep();
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{

View File

@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/pinctrl/pinctrl.h>
struct device;
struct gpio_desc;
@@ -173,6 +174,53 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
#endif /* CONFIG_GPIOLIB_IRQCHIP */
#ifdef CONFIG_PINCTRL
/**
* struct gpio_pin_range - pin range controlled by a gpio chip
* @head: list for maintaining set of pin ranges, used internally
* @pctldev: pinctrl device which handles corresponding pins
* @range: actual range of pins controlled by a gpio controller
*/
struct gpio_pin_range {
struct list_head node;
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range range;
};
int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins);
int gpiochip_add_pingroup_range(struct gpio_chip *chip,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
#else
static inline int
gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
static inline int
gpiochip_add_pingroup_range(struct gpio_chip *chip,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
return 0;
}
static inline void
gpiochip_remove_pin_ranges(struct gpio_chip *chip)
{
}
#endif /* CONFIG_PINCTRL */
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label);
void gpiochip_free_own_desc(struct gpio_desc *desc);

View File

@@ -135,6 +135,7 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
u32 host1x_syncpt_read(struct host1x_syncpt *sp);
int host1x_syncpt_incr(struct host1x_syncpt *sp);
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,

View File

@@ -135,9 +135,9 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
* @device: Driver model representation of the device
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
* e_handler: Callback for handling port events (RX Wake High/Low)
* pclaimed: Keeps tracks if the clients claimed its associated HSI port
* nb: Notifier block for port events
* @e_handler: Callback for handling port events (RX Wake High/Low)
* @pclaimed: Keeps tracks if the clients claimed its associated HSI port
* @nb: Notifier block for port events
*/
struct hsi_client {
struct device device;

View File

@@ -22,7 +22,13 @@ struct mmu_gather;
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages, used_hpages;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
/* both alloced and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
/* sasitfy minimum size. */
};
struct resv_map {
@@ -38,11 +44,10 @@ extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) \
for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
int PageHuge(struct page *page);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -79,7 +84,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -109,11 +113,6 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
#else /* !CONFIG_HUGETLB_PAGE */
static inline int PageHuge(struct page *page)
{
return 0;
}
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)

View File

@@ -50,10 +50,14 @@ struct hwrng {
struct completion cleanup_done;
};
struct device;
/** Register a new Hardware Random Number Generator driver. */
extern int hwrng_register(struct hwrng *rng);
extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
/** Feed random bits into the pool. */
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);

View File

@@ -646,12 +646,13 @@ struct hv_input_signal_event_buffer {
};
struct vmbus_channel {
/* Unique channel id */
int id;
struct list_head listentry;
struct hv_device *device_obj;
struct work_struct work;
enum vmbus_channel_state state;
struct vmbus_channel_offer_channel offermsg;
@@ -672,7 +673,6 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
spinlock_t inbound_lock;
struct workqueue_struct *controlwq;
struct vmbus_close_msg close_msg;
@@ -758,6 +758,9 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
int num_sc;
int next_oc;
};
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
@@ -861,6 +864,14 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
u64 requestid,
enum vmbus_packet_type type,
u32 flags,
bool kick_q);
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount,
@@ -868,6 +879,15 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount,
void *buffer,
u32 bufferlen,
u64 requestid,
u32 flags,
bool kick_q);
extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *mpb,
void *buffer,
@@ -1106,6 +1126,16 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \
}
/*
* NetworkDirect. This is the guest RDMA service.
* {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
*/
#define HV_ND_GUID \
.guid = { \
0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \
0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \
}
/*
* Common header for Hyper-V ICs
*/
@@ -1213,6 +1243,7 @@ void hv_kvp_onchannelcallback(void *);
int hv_vss_init(struct hv_util_service *);
void hv_vss_deinit(void);
void hv_vss_onchannelcallback(void *);
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
extern struct resource hyperv_mmio;

View File

@@ -253,10 +253,10 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
I2C_SLAVE_REQ_READ_START,
I2C_SLAVE_REQ_READ_END,
I2C_SLAVE_REQ_WRITE_START,
I2C_SLAVE_REQ_WRITE_END,
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
I2C_SLAVE_READ_PROCESSED,
I2C_SLAVE_WRITE_RECEIVED,
I2C_SLAVE_STOP,
};
@@ -278,7 +278,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @acpi_node: ACPI device node
* @fwnode: device node supplied by the platform firmware
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@@ -299,7 +299,7 @@ struct i2c_board_info {
void *platform_data;
struct dev_archdata *archdata;
struct device_node *of_node;
struct acpi_dev_node acpi_node;
struct fwnode_handle *fwnode;
int irq;
};
@@ -435,8 +435,8 @@ struct i2c_bus_recovery_info {
void (*set_scl)(struct i2c_adapter *, int val);
int (*get_sda)(struct i2c_adapter *);
void (*prepare_recovery)(struct i2c_bus_recovery_info *bri);
void (*unprepare_recovery)(struct i2c_bus_recovery_info *bri);
void (*prepare_recovery)(struct i2c_adapter *);
void (*unprepare_recovery)(struct i2c_adapter *);
/* gpio recovery */
int scl_gpio;
@@ -449,6 +449,48 @@ int i2c_recover_bus(struct i2c_adapter *adap);
int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
int i2c_generic_scl_recovery(struct i2c_adapter *adap);
/**
* struct i2c_adapter_quirks - describe flaws of an i2c adapter
* @flags: see I2C_AQ_* for possible flags and read below
* @max_num_msgs: maximum number of messages per transfer
* @max_write_len: maximum length of a write message
* @max_read_len: maximum length of a read message
* @max_comb_1st_msg_len: maximum length of the first msg in a combined message
* @max_comb_2nd_msg_len: maximum length of the second msg in a combined message
*
* Note about combined messages: Some I2C controllers can only send one message
* per transfer, plus something called combined message or write-then-read.
* This is (usually) a small write message followed by a read message and
* barely enough to access register based devices like EEPROMs. There is a flag
* to support this mode. It implies max_num_msg = 2 and does the length checks
* with max_comb_*_len because combined message mode usually has its own
* limitations. Because of HW implementations, some controllers can actually do
* write-then-anything or other variants. To support that, write-then-read has
* been broken out into smaller bits like write-first and read-second which can
* be combined as needed.
*/
struct i2c_adapter_quirks {
u64 flags;
int max_num_msgs;
u16 max_write_len;
u16 max_read_len;
u16 max_comb_1st_msg_len;
u16 max_comb_2nd_msg_len;
};
/* enforce max_num_msgs = 2 and use max_comb_*_len for length checks */
#define I2C_AQ_COMB BIT(0)
/* first combined message must be write */
#define I2C_AQ_COMB_WRITE_FIRST BIT(1)
/* second combined message must be read */
#define I2C_AQ_COMB_READ_SECOND BIT(2)
/* both combined messages must have the same target address */
#define I2C_AQ_COMB_SAME_ADDR BIT(3)
/* convenience macro for typical write-then read case */
#define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \
I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
* with the access algorithms necessary to access it.
@@ -474,6 +516,7 @@ struct i2c_adapter {
struct list_head userspace_clients;
struct i2c_bus_recovery_info *bus_recovery_info;
const struct i2c_adapter_quirks *quirks;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)

View File

@@ -28,7 +28,9 @@
#include <asm/byteorder.h>
#define IEEE802154_MTU 127
#define IEEE802154_MIN_PSDU_LEN 5
#define IEEE802154_ACK_PSDU_LEN 5
#define IEEE802154_MIN_PSDU_LEN 9
#define IEEE802154_FCS_LEN 2
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
@@ -38,6 +40,7 @@
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
#define IEEE802154_MAX_CHANNEL 26
#define IEEE802154_MAX_PAGE 31
@@ -204,11 +207,18 @@ enum {
/**
* ieee802154_is_valid_psdu_len - check if psdu len is valid
* available lengths:
* 0-4 Reserved
* 5 MPDU (Acknowledgment)
* 6-8 Reserved
* 9-127 MPDU
*
* @len: psdu len with (MHR + payload + MFR)
*/
static inline bool ieee802154_is_valid_psdu_len(const u8 len)
{
return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
return (len == IEEE802154_ACK_PSDU_LEN ||
(len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
}
/**

View File

@@ -44,6 +44,7 @@ struct br_ip_list {
#define BR_PROMISC BIT(7)
#define BR_PROXYARP BIT(8)
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));

View File

@@ -14,5 +14,6 @@ struct ifla_vf_info {
__u32 linkstate;
__u32 min_tx_rate;
__u32 max_tx_rate;
__u32 rss_query_en;
};
#endif /* _LINUX_IF_LINK_H */

View File

@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/ppp_channel.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <uapi/linux/if_pppox.h>
static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
@@ -32,6 +33,7 @@ struct pppoe_opt {
struct pppoe_addr pa; /* what this socket is bound to*/
struct sockaddr_pppox relay; /* what socket data will be
relayed to (PPPoE relaying) */
struct work_struct padt_work;/* Work item for handling PADT */
};
struct pptp_opt {

View File

@@ -561,4 +561,71 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
skb->protocol = htons(ETH_P_802_2);
}
/**
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
* Returns true if the skb is tagged, regardless of whether it is hardware
* accelerated or not.
*/
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
{
if (!skb_vlan_tag_present(skb) &&
likely(skb->protocol != htons(ETH_P_8021Q) &&
skb->protocol != htons(ETH_P_8021AD)))
return false;
return true;
}
/**
* skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
* @skb: skbuff to query
*
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
if (!skb_vlan_tag_present(skb)) {
struct vlan_ethhdr *veh;
if (likely(protocol != htons(ETH_P_8021Q) &&
protocol != htons(ETH_P_8021AD)))
return false;
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
return false;
return true;
}
/**
* vlan_features_check - drop unsafe features for skb with multiple tags.
* @skb: skbuff to query
* @features: features to be checked
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb))
features = netdev_intersect_features(features,
NETIF_F_SG |
NETIF_F_HIGHDMA |
NETIF_F_FRAGLIST |
NETIF_F_GEN_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
return features;
}
#endif /* !(_LINUX_IF_VLAN_H_) */

View File

@@ -11,33 +11,34 @@ struct sk_buff;
struct netlink_callback;
struct inet_diag_handler {
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct inet_diag_req_v2 *r,
struct nlattr *bc);
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
struct nlattr *bc);
int (*dump_one)(struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
struct inet_diag_req_v2 *req);
int (*dump_one)(struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
__u16 idiag_type;
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
__u16 idiag_type;
};
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh);
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb, struct inet_diag_req_v2 *r,
struct nlattr *bc);
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
struct nlattr *bc);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
struct sk_buff *in_skb, const struct nlmsghdr *nlh,
struct inet_diag_req_v2 *req);
struct sk_buff *in_skb, const struct nlmsghdr *nlh,
const struct inet_diag_req_v2 *req);
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);

View File

@@ -115,10 +115,19 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
#define ecap_nwfs(e) ((e >> 33) & 0x1)
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
#define ecap_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
#define ecap_ecs(e) ((e >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) \
(ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) ((e >> 6) & 0x1)
@@ -180,6 +189,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_GSTS_IRES (((u32)1) << 25)
#define DMA_GSTS_CFIS (((u32)1) << 23)
/* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)

View File

@@ -38,6 +38,14 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
}
#endif
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
void __init ioremap_huge_init(void);
int arch_ioremap_pud_supported(void);
int arch_ioremap_pmd_supported(void);
#else
static inline void ioremap_huge_init(void) { }
#endif
/*
* Managed iomap interface
*/
@@ -64,6 +72,8 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);

View File

@@ -0,0 +1,51 @@
#ifndef _LINUX_IOMMU_COMMON_H
#define _LINUX_IOMMU_COMMON_H
#include <linux/spinlock_types.h>
#include <linux/device.h>
#include <asm/page.h>
#define IOMMU_POOL_HASHBITS 4
#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
struct iommu_pool {
unsigned long start;
unsigned long end;
unsigned long hint;
spinlock_t lock;
};
struct iommu_map_table {
unsigned long table_map_base;
unsigned long table_shift;
unsigned long nr_pools;
void (*lazy_flush)(struct iommu_map_table *);
unsigned long poolsize;
struct iommu_pool pools[IOMMU_NR_POOLS];
u32 flags;
#define IOMMU_HAS_LARGE_POOL 0x00000001
#define IOMMU_NO_SPAN_BOUND 0x00000002
#define IOMMU_NEED_FLUSH 0x00000004
struct iommu_pool large_pool;
unsigned long *map;
};
extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
unsigned long num_entries,
u32 table_shift,
void (*lazy_flush)(struct iommu_map_table *),
bool large_pool, u32 npools,
bool skip_span_boundary_check);
extern unsigned long iommu_tbl_range_alloc(struct device *dev,
struct iommu_map_table *iommu,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order);
extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
u64 dma_addr, unsigned long npages,
unsigned long entry);
#endif

View File

@@ -51,9 +51,33 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
/*
* This are the possible domain-types
*
* IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
* devices
* IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
* IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
* for VMs
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
void *priv;
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -113,8 +137,11 @@ enum iommu_attr {
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
void (*domain_free)(struct iommu_domain *);
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,

View File

@@ -196,10 +196,8 @@ extern struct resource * __request_region(struct resource *,
/* Compatibility cruft */
#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
extern int __check_region(struct resource *, resource_size_t, resource_size_t);
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -207,12 +205,6 @@ extern int release_mem_region_adjustable(struct resource *, resource_size_t,
resource_size_t);
#endif
static inline int __deprecated check_region(resource_size_t s,
resource_size_t n)
{
return __check_region(&ioport_resource, s, n);
}
/* Wrappers for managed devices */
struct device;

View File

@@ -53,6 +53,10 @@ struct ipv6_devconf {
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
} stable_secret;
void *sysctl;
};

View File

@@ -0,0 +1,31 @@
/*
* Copyright (C) 2014, Linaro Ltd.
* Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ARM_GIC_ACPI_H_
#define ARM_GIC_ACPI_H_
#ifdef CONFIG_ACPI
/*
* Hard code here, we can not get memory size from MADT (but FDT does),
* Actually no need to do that, because this size can be inferred
* from GIC spec.
*/
#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
struct acpi_table_header;
int gic_v2_acpi_init(struct acpi_table_header *table);
void acpi_gic_init(void);
#else
static inline void acpi_gic_init(void) { }
#endif
#endif /* ARM_GIC_ACPI_H_ */

View File

@@ -95,8 +95,6 @@
struct device_node;
extern struct irq_chip gic_arch_extn;
void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);

View File

@@ -165,6 +165,8 @@
#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
#define GIC_VPE_PEND_SWINT1_SHF 5
#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
#define GIC_VPE_PEND_FDC_SHF 6
#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
/* GIC_VPE_RMASK Masks */
#define GIC_VPE_RMASK_WD_SHF 0
@@ -179,6 +181,8 @@
#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
#define GIC_VPE_RMASK_SWINT1_SHF 5
#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
#define GIC_VPE_RMASK_FDC_SHF 6
#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
/* GIC_VPE_SMASK Masks */
#define GIC_VPE_SMASK_WD_SHF 0
@@ -193,6 +197,8 @@
#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
#define GIC_VPE_SMASK_SWINT1_SHF 5
#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
#define GIC_VPE_SMASK_FDC_SHF 6
#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
@@ -247,4 +253,5 @@ extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
extern int gic_get_c0_fdc_int(void);
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */

View File

@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
}
/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += JHASH_INITVAL;
b += JHASH_INITVAL;
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
return c;
}
static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
}
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return jhash_3words(a, b, 0, initval);
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
return jhash_3words(a, 0, 0, initval);
return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */

View File

@@ -0,0 +1,43 @@
/*
* JZ4780 NAND/external memory controller (NEMC)
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <alex@alex-smith.me.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __LINUX_JZ4780_NEMC_H__
#define __LINUX_JZ4780_NEMC_H__
#include <linux/types.h>
struct device;
/*
* Number of NEMC banks. Note that there are actually 6, but they are numbered
* from 1.
*/
#define JZ4780_NEMC_NUM_BANKS 7
/**
* enum jz4780_nemc_bank_type - device types which can be connected to a bank
* @JZ4780_NEMC_BANK_SRAM: SRAM
* @JZ4780_NEMC_BANK_NAND: NAND
*/
enum jz4780_nemc_bank_type {
JZ4780_NEMC_BANK_SRAM,
JZ4780_NEMC_BANK_NAND,
};
extern unsigned int jz4780_nemc_num_banks(struct device *dev);
extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
enum jz4780_nemc_bank_type type);
extern void jz4780_nemc_assert(struct device *dev, unsigned int bank,
bool assert);
#endif /* __LINUX_JZ4780_NEMC_H__ */

View File

@@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kfree_large(const void *ptr);
void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size);
@@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}

View File

@@ -22,14 +22,6 @@
#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
#define ___config_enabled(__ignored, val, ...) val
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
* 0 otherwise.
*
*/
#define IS_ENABLED(option) \
(config_enabled(option) || config_enabled(option##_MODULE))
/*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
* otherwise. For boolean options, this is equivalent to
@@ -43,4 +35,20 @@
*/
#define IS_MODULE(option) config_enabled(option##_MODULE)
/*
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
* code can call a function defined in code compiled based on CONFIG_FOO.
* This is similar to IS_ENABLED(), but returns false when invoked from
* built-in code when CONFIG_FOO is set to 'm'.
*/
#define IS_REACHABLE(option) (config_enabled(option) || \
(config_enabled(option##_MODULE) && config_enabled(MODULE)))
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
* 0 otherwise.
*/
#define IS_ENABLED(option) \
(IS_BUILTIN(option) || IS_MODULE(option))
#endif /* __LINUX_KCONFIG_H */

View File

@@ -103,6 +103,18 @@
(((__x) - ((__d) / 2)) / (__d)); \
} \
)
/*
* Same as above but for u64 dividends. divisor must be a 32-bit
* number.
*/
#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
{ \
typeof(divisor) __d = divisor; \
unsigned long long _tmp = (x) + (__d) / 2; \
do_div(_tmp, __d); \
_tmp; \
} \
)
/*
* Multiplies an integer by a fraction, while avoiding unnecessary

View File

@@ -40,6 +40,10 @@
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
#endif
#ifndef KEXEC_CONTROL_MEMORY_GFP
#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
#endif
#ifndef KEXEC_CONTROL_PAGE_SIZE
#error KEXEC_CONTROL_PAGE_SIZE not defined
#endif

View File

@@ -35,18 +35,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
static inline int PageKsm(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
}
static inline struct stable_node *page_stable_node(struct page *page)
{
return PageKsm(page) ? page_rmapping(page) : NULL;
@@ -87,11 +75,6 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
static inline int PageKsm(struct page *page)
{
return 0;
}
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)

View File

@@ -779,7 +779,8 @@ static inline void kvm_guest_enter(void)
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
*/
rcu_virt_note_context_switch(smp_processor_id());
if (!context_tracking_cpu_is_enabled())
rcu_virt_note_context_switch(smp_processor_id());
}
static inline void kvm_guest_exit(void)

View File

@@ -13,7 +13,6 @@
#define __LINUX_FLASH_LEDS_H_INCLUDED
#include <linux/leds.h>
#include <uapi/linux/v4l2-controls.h>
struct device_node;
struct led_classdev_flash;
@@ -33,7 +32,7 @@ struct led_classdev_flash;
#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8)
#define LED_NUM_FLASH_FAULTS 9
#define LED_FLASH_MAX_SYSFS_GROUPS 7
#define LED_FLASH_SYSFS_GROUPS_SIZE 5
struct led_flash_ops {
/* set flash brightness */
@@ -81,21 +80,7 @@ struct led_classdev_flash {
struct led_flash_setting timeout;
/* LED Flash class sysfs groups */
const struct attribute_group *sysfs_groups[LED_FLASH_MAX_SYSFS_GROUPS];
/* LEDs available for flash strobe synchronization */
struct led_classdev_flash **sync_leds;
/* Number of LEDs available for flash strobe synchronization */
int num_sync_leds;
/*
* The identifier of the sub-led to synchronize the flash strobe with.
* Identifiers start from 1, which reflects the first element from the
* sync_leds array. 0 means that the flash strobe should not be
* synchronized.
*/
u32 sync_led_id;
const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
};
static inline struct led_classdev_flash *lcdev_to_flcdev(

Some files were not shown because too many files have changed in this diff Show More