Merge branch 'for-4.12/asus' into for-linus
This commit is contained in:
@@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
|
||||
}
|
||||
|
||||
/* Validate the processor object's proc_id */
|
||||
bool acpi_processor_validate_proc_id(int proc_id);
|
||||
bool acpi_duplicate_processor_id(int proc_id);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
/* Arch dependent functions for cpu hotplug support */
|
||||
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
|
||||
int *pcpu);
|
||||
int acpi_unmap_cpu(int cpu);
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
void acpi_set_processor_mapping(void);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
|
||||
#endif
|
||||
|
@@ -14,9 +14,6 @@
|
||||
#ifndef __LINUX_ARM_SMCCC_H
|
||||
#define __LINUX_ARM_SMCCC_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* This file provides common defines for ARM SMC Calling Convention as
|
||||
* specified in
|
||||
@@ -60,6 +57,13 @@
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
|
||||
|
||||
#define ARM_SMCCC_QUIRK_NONE 0
|
||||
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
/**
|
||||
* struct arm_smccc_res - Result from SMC/HVC call
|
||||
* @a0-a3 result values from registers 0 to 3
|
||||
@@ -72,33 +76,59 @@ struct arm_smccc_res {
|
||||
};
|
||||
|
||||
/**
|
||||
* arm_smccc_smc() - make SMC calls
|
||||
* struct arm_smccc_quirk - Contains quirk information
|
||||
* @id: quirk identification
|
||||
* @state: quirk specific information
|
||||
* @a6: Qualcomm quirk entry for returning post-smc call contents of a6
|
||||
*/
|
||||
struct arm_smccc_quirk {
|
||||
int id;
|
||||
union {
|
||||
unsigned long a6;
|
||||
} state;
|
||||
};
|
||||
|
||||
/**
|
||||
* __arm_smccc_smc() - make SMC calls
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
* @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
|
||||
*
|
||||
* This function is used to make SMC calls following SMC Calling Convention.
|
||||
* The content of the supplied param are copied to registers 0 to 7 prior
|
||||
* to the SMC instruction. The return values are updated with the content
|
||||
* from register 0 to 3 on return from the SMC instruction.
|
||||
* from register 0 to 3 on return from the SMC instruction. An optional
|
||||
* quirk structure provides vendor specific behavior.
|
||||
*/
|
||||
asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
|
||||
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3, unsigned long a4,
|
||||
unsigned long a5, unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res);
|
||||
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
|
||||
|
||||
/**
|
||||
* arm_smccc_hvc() - make HVC calls
|
||||
* __arm_smccc_hvc() - make HVC calls
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
* @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
|
||||
*
|
||||
* This function is used to make HVC calls following SMC Calling
|
||||
* Convention. The content of the supplied param are copied to registers 0
|
||||
* to 7 prior to the HVC instruction. The return values are updated with
|
||||
* the content from register 0 to 3 on return from the HVC instruction.
|
||||
* the content from register 0 to 3 on return from the HVC instruction. An
|
||||
* optional quirk structure provides vendor specific behavior.
|
||||
*/
|
||||
asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3, unsigned long a4,
|
||||
unsigned long a5, unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res);
|
||||
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
|
||||
|
||||
#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL)
|
||||
|
||||
#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__)
|
||||
|
||||
#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL)
|
||||
|
||||
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
#endif /*__LINUX_ARM_SMCCC_H*/
|
||||
|
@@ -20,6 +20,7 @@ struct ssc_device {
|
||||
int user;
|
||||
int irq;
|
||||
bool clk_from_rk_pin;
|
||||
bool sound_dai;
|
||||
};
|
||||
|
||||
struct ssc_device * __must_check ssc_request(unsigned int ssc_num);
|
||||
|
@@ -1,45 +1,66 @@
|
||||
#ifndef _LINUX_AVERAGE_H
|
||||
#define _LINUX_AVERAGE_H
|
||||
|
||||
/* Exponentially weighted moving average (EWMA) */
|
||||
/*
|
||||
* Exponentially weighted moving average (EWMA)
|
||||
*
|
||||
* This implements a fixed-precision EWMA algorithm, with both the
|
||||
* precision and fall-off coefficient determined at compile-time
|
||||
* and built into the generated helper funtions.
|
||||
*
|
||||
* The first argument to the macro is the name that will be used
|
||||
* for the struct and helper functions.
|
||||
*
|
||||
* The second argument, the precision, expresses how many bits are
|
||||
* used for the fractional part of the fixed-precision values.
|
||||
*
|
||||
* The third argument, the weight reciprocal, determines how the
|
||||
* new values will be weighed vs. the old state, new values will
|
||||
* get weight 1/weight_rcp and old values 1-1/weight_rcp. Note
|
||||
* that this parameter must be a power of two for efficiency.
|
||||
*/
|
||||
|
||||
#define DECLARE_EWMA(name, _factor, _weight) \
|
||||
#define DECLARE_EWMA(name, _precision, _weight_rcp) \
|
||||
struct ewma_##name { \
|
||||
unsigned long internal; \
|
||||
}; \
|
||||
static inline void ewma_##name##_init(struct ewma_##name *e) \
|
||||
{ \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
|
||||
/* \
|
||||
* Even if you want to feed it just 0/1 you should have \
|
||||
* some bits for the non-fractional part... \
|
||||
*/ \
|
||||
BUILD_BUG_ON((_precision) > 30); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
|
||||
e->internal = 0; \
|
||||
} \
|
||||
static inline unsigned long \
|
||||
ewma_##name##_read(struct ewma_##name *e) \
|
||||
{ \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
|
||||
return e->internal >> ilog2(_factor); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
|
||||
BUILD_BUG_ON((_precision) > 30); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
|
||||
return e->internal >> (_precision); \
|
||||
} \
|
||||
static inline void ewma_##name##_add(struct ewma_##name *e, \
|
||||
unsigned long val) \
|
||||
{ \
|
||||
unsigned long internal = ACCESS_ONCE(e->internal); \
|
||||
unsigned long weight = ilog2(_weight); \
|
||||
unsigned long factor = ilog2(_factor); \
|
||||
unsigned long weight_rcp = ilog2(_weight_rcp); \
|
||||
unsigned long precision = _precision; \
|
||||
\
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
|
||||
BUILD_BUG_ON((_precision) > 30); \
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
|
||||
\
|
||||
ACCESS_ONCE(e->internal) = internal ? \
|
||||
(((internal << weight) - internal) + \
|
||||
(val << factor)) >> weight : \
|
||||
(val << factor); \
|
||||
(((internal << weight_rcp) - internal) + \
|
||||
(val << precision)) >> weight_rcp : \
|
||||
(val << precision); \
|
||||
}
|
||||
|
||||
#endif /* _LINUX_AVERAGE_H */
|
||||
|
@@ -6,6 +6,8 @@
|
||||
#include <asm/exec.h>
|
||||
#include <uapi/linux/binfmts.h>
|
||||
|
||||
struct filename;
|
||||
|
||||
#define CORENAME_MAX_SIZE 128
|
||||
|
||||
/*
|
||||
@@ -123,4 +125,12 @@ extern void install_exec_creds(struct linux_binprm *bprm);
|
||||
extern void set_binfmt(struct linux_binfmt *new);
|
||||
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
|
||||
|
||||
extern int do_execve(struct filename *,
|
||||
const char __user * const __user *,
|
||||
const char __user * const __user *);
|
||||
extern int do_execveat(int, struct filename *,
|
||||
const char __user * const __user *,
|
||||
const char __user * const __user *,
|
||||
int);
|
||||
|
||||
#endif /* _LINUX_BINFMTS_H */
|
||||
|
@@ -183,7 +183,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
|
||||
|
||||
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
|
||||
|
||||
static inline unsigned bio_segments(struct bio *bio)
|
||||
static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec)
|
||||
{
|
||||
unsigned segs = 0;
|
||||
struct bio_vec bv;
|
||||
@@ -205,12 +205,17 @@ static inline unsigned bio_segments(struct bio *bio)
|
||||
break;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bv, bio, iter)
|
||||
__bio_for_each_segment(bv, bio, iter, *bvec)
|
||||
segs++;
|
||||
|
||||
return segs;
|
||||
}
|
||||
|
||||
static inline unsigned bio_segments(struct bio *bio)
|
||||
{
|
||||
return __bio_segments(bio, &bio->bi_iter);
|
||||
}
|
||||
|
||||
/*
|
||||
* get a reference to a bio, so it won't disappear. the intended use is
|
||||
* something like:
|
||||
@@ -384,6 +389,8 @@ extern void bio_put(struct bio *);
|
||||
extern void __bio_clone_fast(struct bio *, struct bio *);
|
||||
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
||||
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
|
||||
extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t,
|
||||
struct bio_set *, int, int);
|
||||
|
||||
extern struct bio_set *fs_bio_set;
|
||||
|
||||
|
@@ -62,6 +62,19 @@
|
||||
(1ULL << __bf_shf(_mask))); \
|
||||
})
|
||||
|
||||
/**
|
||||
* FIELD_FIT() - check if value fits in the field
|
||||
* @_mask: shifted mask defining the field's length and position
|
||||
* @_val: value to test against the field
|
||||
*
|
||||
* Return: true if @_val can fit inside @_mask, false if @_val is too big.
|
||||
*/
|
||||
#define FIELD_FIT(_mask, _val) \
|
||||
({ \
|
||||
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
|
||||
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
|
||||
})
|
||||
|
||||
/**
|
||||
* FIELD_PREP() - prepare a bitfield element
|
||||
* @_mask: shifted mask defining the field's length and position
|
||||
|
10
include/linux/blk-mq-virtio.h
Normal file
10
include/linux/blk-mq-virtio.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#ifndef _LINUX_BLK_MQ_VIRTIO_H
|
||||
#define _LINUX_BLK_MQ_VIRTIO_H
|
||||
|
||||
struct blk_mq_tag_set;
|
||||
struct virtio_device;
|
||||
|
||||
int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
|
||||
struct virtio_device *vdev, int first_vec);
|
||||
|
||||
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
|
@@ -33,6 +33,7 @@ struct blk_mq_hw_ctx {
|
||||
struct blk_mq_ctx **ctxs;
|
||||
unsigned int nr_ctx;
|
||||
|
||||
wait_queue_t dispatch_wait;
|
||||
atomic_t wait_index;
|
||||
|
||||
struct blk_mq_tags *tags;
|
||||
@@ -160,6 +161,7 @@ enum {
|
||||
BLK_MQ_S_STOPPED = 0,
|
||||
BLK_MQ_S_TAG_ACTIVE = 1,
|
||||
BLK_MQ_S_SCHED_RESTART = 2,
|
||||
BLK_MQ_S_TAG_WAITING = 3,
|
||||
|
||||
BLK_MQ_MAX_DEPTH = 10240,
|
||||
|
||||
@@ -243,6 +245,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
void blk_mq_freeze_queue(struct request_queue *q);
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q);
|
||||
void blk_mq_freeze_queue_start(struct request_queue *q);
|
||||
void blk_mq_freeze_queue_wait(struct request_queue *q);
|
||||
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
|
||||
unsigned long timeout);
|
||||
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
|
||||
|
||||
int blk_mq_map_queues(struct blk_mq_tag_set *set);
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#define _LINUX_BLKDEV_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/clock.h>
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
@@ -434,7 +435,6 @@ struct request_queue {
|
||||
struct delayed_work delay_work;
|
||||
|
||||
struct backing_dev_info *backing_dev_info;
|
||||
struct disk_devt *disk_devt;
|
||||
|
||||
/*
|
||||
* The queue owner gets to use this for whatever they like.
|
||||
|
@@ -8,10 +8,12 @@
|
||||
#define _LINUX_BPF_H 1
|
||||
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/rbtree_latch.h>
|
||||
|
||||
struct perf_event;
|
||||
struct bpf_map;
|
||||
@@ -69,14 +71,14 @@ enum bpf_arg_type {
|
||||
/* the following constraints used to prototype bpf_memcmp() and other
|
||||
* functions that access data on eBPF program stack
|
||||
*/
|
||||
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
|
||||
ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not
|
||||
* need to be initialized, helper function must fill
|
||||
* all bytes or clear them in error case.
|
||||
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
|
||||
ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
|
||||
* helper function must fill all bytes or clear
|
||||
* them in error case.
|
||||
*/
|
||||
|
||||
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
|
||||
ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
|
||||
ARG_CONST_SIZE, /* number of bytes accessed from memory */
|
||||
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
|
||||
|
||||
ARG_PTR_TO_CTX, /* pointer to context */
|
||||
ARG_ANYTHING, /* any (initialized) argument is ok */
|
||||
@@ -161,9 +163,10 @@ struct bpf_verifier_ops {
|
||||
enum bpf_reg_type *reg_type);
|
||||
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
|
||||
const struct bpf_prog *prog);
|
||||
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
|
||||
int src_reg, int ctx_off,
|
||||
struct bpf_insn *insn, struct bpf_prog *prog);
|
||||
u32 (*convert_ctx_access)(enum bpf_access_type type,
|
||||
const struct bpf_insn *src,
|
||||
struct bpf_insn *dst,
|
||||
struct bpf_prog *prog);
|
||||
};
|
||||
|
||||
struct bpf_prog_type_list {
|
||||
@@ -176,6 +179,8 @@ struct bpf_prog_aux {
|
||||
atomic_t refcnt;
|
||||
u32 used_map_cnt;
|
||||
u32 max_ctx_offset;
|
||||
struct latch_tree_node ksym_tnode;
|
||||
struct list_head ksym_lnode;
|
||||
const struct bpf_verifier_ops *ops;
|
||||
struct bpf_map **used_maps;
|
||||
struct bpf_prog *prog;
|
||||
|
7
include/linux/bpf_trace.h
Normal file
7
include/linux/bpf_trace.h
Normal file
@@ -0,0 +1,7 @@
|
||||
#ifndef __LINUX_BPF_TRACE_H__
|
||||
#define __LINUX_BPF_TRACE_H__
|
||||
|
||||
#include <trace/events/bpf.h>
|
||||
#include <trace/events/xdp.h>
|
||||
|
||||
#endif /* __LINUX_BPF_TRACE_H__ */
|
@@ -17,6 +17,7 @@
|
||||
#define PHY_ID_BCM5482 0x0143bcb0
|
||||
#define PHY_ID_BCM5411 0x00206070
|
||||
#define PHY_ID_BCM5421 0x002060e0
|
||||
#define PHY_ID_BCM54210E 0x600d84a0
|
||||
#define PHY_ID_BCM5464 0x002060b0
|
||||
#define PHY_ID_BCM5461 0x002060c0
|
||||
#define PHY_ID_BCM54612E 0x03625e60
|
||||
@@ -24,6 +25,7 @@
|
||||
#define PHY_ID_BCM57780 0x03625d90
|
||||
|
||||
#define PHY_ID_BCM7250 0xae025280
|
||||
#define PHY_ID_BCM7278 0xae0251a0
|
||||
#define PHY_ID_BCM7364 0xae025260
|
||||
#define PHY_ID_BCM7366 0x600d8490
|
||||
#define PHY_ID_BCM7346 0x600d8650
|
||||
@@ -31,6 +33,7 @@
|
||||
#define PHY_ID_BCM7425 0x600d86b0
|
||||
#define PHY_ID_BCM7429 0x600d8730
|
||||
#define PHY_ID_BCM7435 0x600d8750
|
||||
#define PHY_ID_BCM74371 0xae0252e0
|
||||
#define PHY_ID_BCM7439 0x600d8480
|
||||
#define PHY_ID_BCM7439_2 0xae025080
|
||||
#define PHY_ID_BCM7445 0x600d8510
|
||||
@@ -103,19 +106,17 @@
|
||||
/*
|
||||
* AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
|
||||
*/
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
|
||||
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
|
||||
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
|
||||
|
||||
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
|
||||
#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100
|
||||
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
|
||||
#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
|
||||
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
|
||||
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
|
||||
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
|
||||
|
||||
/*
|
||||
|
@@ -124,18 +124,20 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
|
||||
|
||||
/*
|
||||
* Since detected data corruption should stop operation on the affected
|
||||
* structures, this returns false if the corruption condition is found.
|
||||
* structures. Return value must be checked and sanely acted on by caller.
|
||||
*/
|
||||
static inline __must_check bool check_data_corruption(bool v) { return v; }
|
||||
#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
|
||||
do { \
|
||||
if (unlikely(condition)) { \
|
||||
check_data_corruption(({ \
|
||||
bool corruption = unlikely(condition); \
|
||||
if (corruption) { \
|
||||
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
|
||||
pr_err(fmt, ##__VA_ARGS__); \
|
||||
BUG(); \
|
||||
} else \
|
||||
WARN(1, fmt, ##__VA_ARGS__); \
|
||||
return false; \
|
||||
} \
|
||||
} while (0)
|
||||
corruption; \
|
||||
}))
|
||||
|
||||
#endif /* _LINUX_BUG_H */
|
||||
|
@@ -38,6 +38,13 @@ struct can_priv {
|
||||
struct can_bittiming bittiming, data_bittiming;
|
||||
const struct can_bittiming_const *bittiming_const,
|
||||
*data_bittiming_const;
|
||||
const u16 *termination_const;
|
||||
unsigned int termination_const_cnt;
|
||||
u16 termination;
|
||||
const u32 *bitrate_const;
|
||||
unsigned int bitrate_const_cnt;
|
||||
const u32 *data_bitrate_const;
|
||||
unsigned int data_bitrate_const_cnt;
|
||||
struct can_clock clock;
|
||||
|
||||
enum can_state state;
|
||||
@@ -53,6 +60,7 @@ struct can_priv {
|
||||
int (*do_set_bittiming)(struct net_device *dev);
|
||||
int (*do_set_data_bittiming)(struct net_device *dev);
|
||||
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
|
||||
int (*do_set_termination)(struct net_device *dev, u16 term);
|
||||
int (*do_get_state)(const struct net_device *dev,
|
||||
enum can_state *state);
|
||||
int (*do_get_berr_counter)(const struct net_device *dev,
|
||||
|
59
include/linux/can/rx-offload.h
Normal file
59
include/linux/can/rx-offload.h
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* linux/can/rx-offload.h
|
||||
*
|
||||
* Copyright (c) 2014 David Jander, Protonic Holland
|
||||
* Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the version 2 of the GNU General Public License
|
||||
* as published by the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _CAN_RX_OFFLOAD_H
|
||||
#define _CAN_RX_OFFLOAD_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/can.h>
|
||||
|
||||
struct can_rx_offload {
|
||||
struct net_device *dev;
|
||||
|
||||
unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf,
|
||||
u32 *timestamp, unsigned int mb);
|
||||
|
||||
struct sk_buff_head skb_queue;
|
||||
u32 skb_queue_len_max;
|
||||
|
||||
unsigned int mb_first;
|
||||
unsigned int mb_last;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
bool inc;
|
||||
};
|
||||
|
||||
int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload);
|
||||
int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
|
||||
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
|
||||
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
|
||||
int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
|
||||
void can_rx_offload_reset(struct can_rx_offload *offload);
|
||||
void can_rx_offload_del(struct can_rx_offload *offload);
|
||||
void can_rx_offload_enable(struct can_rx_offload *offload);
|
||||
|
||||
static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
|
||||
{
|
||||
napi_schedule(&offload->napi);
|
||||
}
|
||||
|
||||
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
|
||||
{
|
||||
napi_disable(&offload->napi);
|
||||
}
|
||||
|
||||
#endif /* !_CAN_RX_OFFLOAD_H */
|
@@ -48,6 +48,7 @@ struct ceph_options {
|
||||
unsigned long mount_timeout; /* jiffies */
|
||||
unsigned long osd_idle_ttl; /* jiffies */
|
||||
unsigned long osd_keepalive_timeout; /* jiffies */
|
||||
unsigned long osd_request_timeout; /* jiffies */
|
||||
|
||||
/*
|
||||
* any type that can't be simply compared or doesn't need need
|
||||
@@ -68,6 +69,7 @@ struct ceph_options {
|
||||
#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
|
||||
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
|
||||
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
|
||||
#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */
|
||||
|
||||
#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
|
||||
#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
|
||||
|
@@ -22,7 +22,6 @@ struct ceph_osd_client;
|
||||
* completion callback for async writepages
|
||||
*/
|
||||
typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
|
||||
typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
|
||||
|
||||
#define CEPH_HOMELESS_OSD -1
|
||||
|
||||
@@ -170,15 +169,12 @@ struct ceph_osd_request {
|
||||
unsigned int r_num_ops;
|
||||
|
||||
int r_result;
|
||||
bool r_got_reply;
|
||||
|
||||
struct ceph_osd_client *r_osdc;
|
||||
struct kref r_kref;
|
||||
bool r_mempool;
|
||||
struct completion r_completion;
|
||||
struct completion r_done_completion; /* fsync waiter */
|
||||
struct completion r_completion; /* private to osd_client.c */
|
||||
ceph_osdc_callback_t r_callback;
|
||||
ceph_osdc_unsafe_callback_t r_unsafe_callback;
|
||||
struct list_head r_unsafe_item;
|
||||
|
||||
struct inode *r_inode; /* for use by callbacks */
|
||||
@@ -193,6 +189,7 @@ struct ceph_osd_request {
|
||||
|
||||
/* internal */
|
||||
unsigned long r_stamp; /* jiffies, send or check time */
|
||||
unsigned long r_start_stamp; /* jiffies */
|
||||
int r_attempts;
|
||||
struct ceph_eversion r_replay_version; /* aka reassert_version */
|
||||
u32 r_last_force_resend;
|
||||
|
@@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
|
||||
case CEPH_POOL_TYPE_EC:
|
||||
return false;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,13 +81,6 @@ void ceph_oloc_copy(struct ceph_object_locator *dest,
|
||||
const struct ceph_object_locator *src);
|
||||
void ceph_oloc_destroy(struct ceph_object_locator *oloc);
|
||||
|
||||
/*
|
||||
* Maximum supported by kernel client object name length
|
||||
*
|
||||
* (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100)
|
||||
*/
|
||||
#define CEPH_MAX_OID_NAME_LEN 100
|
||||
|
||||
/*
|
||||
* 51-char inline_name is long enough for all cephfs and all but one
|
||||
* rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
|
||||
@@ -173,8 +166,8 @@ struct ceph_osdmap {
|
||||
* the list of osds that store+replicate them. */
|
||||
struct crush_map *crush;
|
||||
|
||||
struct mutex crush_scratch_mutex;
|
||||
int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
|
||||
struct mutex crush_workspace_mutex;
|
||||
void *crush_workspace;
|
||||
};
|
||||
|
||||
static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
|
||||
|
@@ -50,7 +50,7 @@ struct ceph_timespec {
|
||||
#define CEPH_PG_LAYOUT_LINEAR 2
|
||||
#define CEPH_PG_LAYOUT_HYBRID 3
|
||||
|
||||
#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */
|
||||
#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */
|
||||
|
||||
/*
|
||||
* placement group.
|
||||
|
@@ -148,14 +148,18 @@ struct cgroup_subsys_state {
|
||||
* set for a task.
|
||||
*/
|
||||
struct css_set {
|
||||
/* Reference count */
|
||||
/*
|
||||
* Set of subsystem states, one for each subsystem. This array is
|
||||
* immutable after creation apart from the init_css_set during
|
||||
* subsystem registration (at boot time).
|
||||
*/
|
||||
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
/* reference count */
|
||||
atomic_t refcount;
|
||||
|
||||
/*
|
||||
* List running through all cgroup groups in the same hash
|
||||
* slot. Protected by css_set_lock
|
||||
*/
|
||||
struct hlist_node hlist;
|
||||
/* the default cgroup associated with this css_set */
|
||||
struct cgroup *dfl_cgrp;
|
||||
|
||||
/*
|
||||
* Lists running through all tasks using this cgroup group.
|
||||
@@ -167,22 +171,30 @@ struct css_set {
|
||||
struct list_head tasks;
|
||||
struct list_head mg_tasks;
|
||||
|
||||
/* all css_task_iters currently walking this cset */
|
||||
struct list_head task_iters;
|
||||
|
||||
/*
|
||||
* On the default hierarhcy, ->subsys[ssid] may point to a css
|
||||
* attached to an ancestor instead of the cgroup this css_set is
|
||||
* associated with. The following node is anchored at
|
||||
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
|
||||
* iterate through all css's attached to a given cgroup.
|
||||
*/
|
||||
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
/*
|
||||
* List running through all cgroup groups in the same hash
|
||||
* slot. Protected by css_set_lock
|
||||
*/
|
||||
struct hlist_node hlist;
|
||||
|
||||
/*
|
||||
* List of cgrp_cset_links pointing at cgroups referenced from this
|
||||
* css_set. Protected by css_set_lock.
|
||||
*/
|
||||
struct list_head cgrp_links;
|
||||
|
||||
/* the default cgroup associated with this css_set */
|
||||
struct cgroup *dfl_cgrp;
|
||||
|
||||
/*
|
||||
* Set of subsystem states, one for each subsystem. This array is
|
||||
* immutable after creation apart from the init_css_set during
|
||||
* subsystem registration (at boot time).
|
||||
*/
|
||||
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
/*
|
||||
* List of csets participating in the on-going migration either as
|
||||
* source or destination. Protected by cgroup_mutex.
|
||||
@@ -201,18 +213,6 @@ struct css_set {
|
||||
struct cgroup *mg_dst_cgrp;
|
||||
struct css_set *mg_dst_cset;
|
||||
|
||||
/*
|
||||
* On the default hierarhcy, ->subsys[ssid] may point to a css
|
||||
* attached to an ancestor instead of the cgroup this css_set is
|
||||
* associated with. The following node is anchored at
|
||||
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
|
||||
* iterate through all css's attached to a given cgroup.
|
||||
*/
|
||||
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
/* all css_task_iters currently walking this cset */
|
||||
struct list_head task_iters;
|
||||
|
||||
/* dead and being drained, ignore for migration */
|
||||
bool dead;
|
||||
|
||||
@@ -388,6 +388,9 @@ struct cftype {
|
||||
struct list_head node; /* anchored at ss->cfts */
|
||||
struct kernfs_ops *kf_ops;
|
||||
|
||||
int (*open)(struct kernfs_open_file *of);
|
||||
void (*release)(struct kernfs_open_file *of);
|
||||
|
||||
/*
|
||||
* read_u64() is a shortcut for the common case of returning a
|
||||
* single integer. Use it in place of read()
|
||||
@@ -528,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
|
||||
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
|
||||
* @tsk: target task
|
||||
*
|
||||
* Called from threadgroup_change_begin() and allows cgroup operations to
|
||||
* synchronize against threadgroup changes using a percpu_rw_semaphore.
|
||||
* Allows cgroup operations to synchronize against threadgroup changes
|
||||
* using a percpu_rw_semaphore.
|
||||
*/
|
||||
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
|
||||
{
|
||||
@@ -540,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
|
||||
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
|
||||
* @tsk: target task
|
||||
*
|
||||
* Called from threadgroup_change_end(). Counterpart of
|
||||
* cgroup_threadcgroup_change_begin().
|
||||
* Counterpart of cgroup_threadcgroup_change_begin().
|
||||
*/
|
||||
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
|
||||
{
|
||||
@@ -552,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
|
||||
|
||||
#define CGROUP_SUBSYS_COUNT 0
|
||||
|
||||
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
|
||||
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
|
||||
{
|
||||
might_sleep();
|
||||
}
|
||||
|
||||
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
|
||||
|
||||
#endif /* CONFIG_CGROUPS */
|
||||
|
@@ -266,7 +266,7 @@ void css_task_iter_end(struct css_task_iter *it);
|
||||
* cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
|
||||
* @leader: the loop cursor
|
||||
* @dst_css: the destination css
|
||||
* @tset: takset to iterate
|
||||
* @tset: taskset to iterate
|
||||
*
|
||||
* Iterate threadgroup leaders of @tset. For single-task migrations, @tset
|
||||
* may not contain any.
|
||||
|
53
include/linux/cgroup_rdma.h
Normal file
53
include/linux/cgroup_rdma.h
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
|
||||
*
|
||||
* This file is subject to the terms and conditions of version 2 of the GNU
|
||||
* General Public License. See the file COPYING in the main directory of the
|
||||
* Linux distribution for more details.
|
||||
*/
|
||||
|
||||
#ifndef _CGROUP_RDMA_H
|
||||
#define _CGROUP_RDMA_H
|
||||
|
||||
#include <linux/cgroup.h>
|
||||
|
||||
enum rdmacg_resource_type {
|
||||
RDMACG_RESOURCE_HCA_HANDLE,
|
||||
RDMACG_RESOURCE_HCA_OBJECT,
|
||||
RDMACG_RESOURCE_MAX,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CGROUP_RDMA
|
||||
|
||||
struct rdma_cgroup {
|
||||
struct cgroup_subsys_state css;
|
||||
|
||||
/*
|
||||
* head to keep track of all resource pools
|
||||
* that belongs to this cgroup.
|
||||
*/
|
||||
struct list_head rpools;
|
||||
};
|
||||
|
||||
struct rdmacg_device {
|
||||
struct list_head dev_node;
|
||||
struct list_head rpools;
|
||||
char *name;
|
||||
};
|
||||
|
||||
/*
|
||||
* APIs for RDMA/IB stack to publish when a device wants to
|
||||
* participate in resource accounting
|
||||
*/
|
||||
int rdmacg_register_device(struct rdmacg_device *device);
|
||||
void rdmacg_unregister_device(struct rdmacg_device *device);
|
||||
|
||||
/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */
|
||||
int rdmacg_try_charge(struct rdma_cgroup **rdmacg,
|
||||
struct rdmacg_device *device,
|
||||
enum rdmacg_resource_type index);
|
||||
void rdmacg_uncharge(struct rdma_cgroup *cg,
|
||||
struct rdmacg_device *device,
|
||||
enum rdmacg_resource_type index);
|
||||
#endif /* CONFIG_CGROUP_RDMA */
|
||||
#endif /* _CGROUP_RDMA_H */
|
@@ -56,6 +56,10 @@ SUBSYS(hugetlb)
|
||||
SUBSYS(pids)
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_CGROUP_RDMA)
|
||||
SUBSYS(rdma)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The following subsystems are not supported on the default hierarchy.
|
||||
*/
|
||||
|
@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
|
||||
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
unsigned int order_per_bit,
|
||||
struct cma **res_cma);
|
||||
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
|
||||
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
||||
gfp_t gfp_mask);
|
||||
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
|
||||
#endif
|
||||
|
@@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
|
||||
compat_stack_t __user *__uss = uss; \
|
||||
struct task_struct *t = current; \
|
||||
put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
|
||||
put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
|
||||
put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
|
||||
put_user_ex(t->sas_ss_size, &__uss->ss_size); \
|
||||
if (t->sas_ss_flags & SS_AUTODISARM) \
|
||||
sas_ss_reset(t); \
|
||||
} while (0);
|
||||
|
||||
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
|
||||
|
@@ -116,11 +116,13 @@
|
||||
*/
|
||||
#define __pure __attribute__((pure))
|
||||
#define __aligned(x) __attribute__((aligned(x)))
|
||||
#define __aligned_largest __attribute__((aligned))
|
||||
#define __printf(a, b) __attribute__((format(printf, a, b)))
|
||||
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
|
||||
#define __attribute_const__ __attribute__((__const__))
|
||||
#define __maybe_unused __attribute__((unused))
|
||||
#define __always_unused __attribute__((unused))
|
||||
#define __mode(x) __attribute__((mode(x)))
|
||||
|
||||
/* gcc version specific checks */
|
||||
|
||||
@@ -195,6 +197,17 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STACK_VALIDATION
|
||||
#define annotate_unreachable() ({ \
|
||||
asm("%c0:\t\n" \
|
||||
".pushsection .discard.unreachable\t\n" \
|
||||
".long %c0b - .\t\n" \
|
||||
".popsection\t\n" : : "i" (__LINE__)); \
|
||||
})
|
||||
#else
|
||||
#define annotate_unreachable()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mark a position in code as unreachable. This can be used to
|
||||
* suppress control flow warnings after asm blocks that transfer
|
||||
@@ -204,7 +217,8 @@
|
||||
* this in the preprocessor, but we can live with this because they're
|
||||
* unreleased. Really, we need to have autoconf for the kernel.
|
||||
*/
|
||||
#define unreachable() __builtin_unreachable()
|
||||
#define unreachable() \
|
||||
do { annotate_unreachable(); __builtin_unreachable(); } while (0)
|
||||
|
||||
/* Mark a function definition as prohibited from being cloned. */
|
||||
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
|
||||
|
@@ -27,7 +27,11 @@ extern void __chk_user_ptr(const volatile void __user *);
|
||||
extern void __chk_io_ptr(const volatile void __iomem *);
|
||||
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
|
||||
#else /* __CHECKER__ */
|
||||
# define __user
|
||||
# ifdef STRUCTLEAK_PLUGIN
|
||||
# define __user __attribute__((user))
|
||||
# else
|
||||
# define __user
|
||||
# endif
|
||||
# define __kernel
|
||||
# define __safe
|
||||
# define __force
|
||||
@@ -101,29 +105,36 @@ struct ftrace_branch_data {
|
||||
};
|
||||
};
|
||||
|
||||
struct ftrace_likely_data {
|
||||
struct ftrace_branch_data data;
|
||||
unsigned long constant;
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
|
||||
* to disable branch tracing on a per file basis.
|
||||
*/
|
||||
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
|
||||
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
|
||||
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
int expect, int is_constant);
|
||||
|
||||
#define likely_notrace(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
|
||||
|
||||
#define __branch_check__(x, expect) ({ \
|
||||
#define __branch_check__(x, expect, is_constant) ({ \
|
||||
int ______r; \
|
||||
static struct ftrace_branch_data \
|
||||
static struct ftrace_likely_data \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_annotated_branch"))) \
|
||||
______f = { \
|
||||
.func = __func__, \
|
||||
.file = __FILE__, \
|
||||
.line = __LINE__, \
|
||||
.data.func = __func__, \
|
||||
.data.file = __FILE__, \
|
||||
.data.line = __LINE__, \
|
||||
}; \
|
||||
______r = likely_notrace(x); \
|
||||
ftrace_likely_update(&______f, ______r, expect); \
|
||||
______r = __builtin_expect(!!(x), expect); \
|
||||
ftrace_likely_update(&______f, ______r, \
|
||||
expect, is_constant); \
|
||||
______r; \
|
||||
})
|
||||
|
||||
@@ -133,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||
* written by Daniel Walker.
|
||||
*/
|
||||
# ifndef likely
|
||||
# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
|
||||
# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
|
||||
# endif
|
||||
# ifndef unlikely
|
||||
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
|
||||
# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
|
||||
# endif
|
||||
|
||||
#ifdef CONFIG_PROFILE_ALL_BRANCHES
|
||||
@@ -566,12 +577,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
(_________p1); \
|
||||
})
|
||||
|
||||
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
|
||||
#ifdef CONFIG_KPROBES
|
||||
# define __kprobes __attribute__((__section__(".kprobes.text")))
|
||||
# define nokprobe_inline __always_inline
|
||||
#else
|
||||
# define __kprobes
|
||||
# define nokprobe_inline inline
|
||||
#endif
|
||||
#endif /* __LINUX_COMPILER_H */
|
||||
|
@@ -72,6 +72,10 @@ struct consw {
|
||||
void (*con_invert_region)(struct vc_data *, u16 *, int);
|
||||
u16 *(*con_screen_pos)(struct vc_data *, int);
|
||||
unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
|
||||
/*
|
||||
* Flush the video console driver's scrollback buffer
|
||||
*/
|
||||
void (*con_flush_scrollback)(struct vc_data *);
|
||||
/*
|
||||
* Prepare the console for the debugger. This includes, but is not
|
||||
* limited to, unblanking the console, loading an appropriate
|
||||
|
@@ -30,6 +30,8 @@ struct cpu {
|
||||
|
||||
extern void boot_cpu_init(void);
|
||||
extern void boot_cpu_state_init(void);
|
||||
extern void cpu_init(void);
|
||||
extern void trap_init(void);
|
||||
|
||||
extern int register_cpu(struct cpu *cpu, int num);
|
||||
extern struct device *get_cpu_device(unsigned cpu);
|
||||
|
@@ -26,7 +26,6 @@ enum cpuhp_state {
|
||||
CPUHP_ARM_OMAP_WAKE_DEAD,
|
||||
CPUHP_IRQ_POLL_DEAD,
|
||||
CPUHP_BLOCK_SOFTIRQ_DEAD,
|
||||
CPUHP_VIRT_SCSI_DEAD,
|
||||
CPUHP_ACPI_CPUDRV_DEAD,
|
||||
CPUHP_S390_PFAULT_DEAD,
|
||||
CPUHP_BLK_MQ_DEAD,
|
||||
@@ -137,6 +136,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
|
@@ -62,6 +62,7 @@ struct cpuidle_state {
|
||||
};
|
||||
|
||||
/* Idle State Flags */
|
||||
#define CPUIDLE_FLAG_NONE (0x00)
|
||||
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
|
||||
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
|
||||
|
||||
|
@@ -9,6 +9,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/topology.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/mm.h>
|
||||
|
@@ -1,13 +0,0 @@
|
||||
#ifndef __LINUX_CPUTIME_H
|
||||
#define __LINUX_CPUTIME_H
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
#include <asm/cputime.h>
|
||||
|
||||
#ifndef cputime_to_nsecs
|
||||
# define cputime_to_nsecs(__ct) \
|
||||
(cputime_to_usecs(__ct) * NSEC_PER_USEC)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
#endif /* __LINUX_CPUTIME_H */
|
@@ -18,8 +18,9 @@
|
||||
#include <linux/selinux.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/uidgid.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/user.h>
|
||||
|
||||
struct user_struct;
|
||||
struct cred;
|
||||
struct inode;
|
||||
|
||||
|
@@ -135,13 +135,6 @@ struct crush_bucket {
|
||||
__u32 size; /* num items */
|
||||
__s32 *items;
|
||||
|
||||
/*
|
||||
* cached random permutation: used for uniform bucket and for
|
||||
* the linear search fallback for the other bucket types.
|
||||
*/
|
||||
__u32 perm_x; /* @x for which *perm is defined */
|
||||
__u32 perm_n; /* num elements of *perm that are permuted/defined */
|
||||
__u32 *perm;
|
||||
};
|
||||
|
||||
struct crush_bucket_uniform {
|
||||
@@ -211,6 +204,21 @@ struct crush_map {
|
||||
* device fails. */
|
||||
__u8 chooseleaf_stable;
|
||||
|
||||
/*
|
||||
* This value is calculated after decode or construction by
|
||||
* the builder. It is exposed here (rather than having a
|
||||
* 'build CRUSH working space' function) so that callers can
|
||||
* reserve a static buffer, allocate space on the stack, or
|
||||
* otherwise avoid calling into the heap allocator if they
|
||||
* want to. The size of the working space depends on the map,
|
||||
* while the size of the scratch vector passed to the mapper
|
||||
* depends on the size of the desired result set.
|
||||
*
|
||||
* Nothing stops the caller from allocating both in one swell
|
||||
* foop and passing in two points, though.
|
||||
*/
|
||||
size_t working_size;
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/*
|
||||
* version 0 (original) of straw_calc has various flaws. version 1
|
||||
@@ -248,4 +256,23 @@ static inline int crush_calc_tree_node(int i)
|
||||
return ((i+1) << 1)-1;
|
||||
}
|
||||
|
||||
/*
|
||||
* These data structures are private to the CRUSH implementation. They
|
||||
* are exposed in this header file because builder needs their
|
||||
* definitions to calculate the total working size.
|
||||
*
|
||||
* Moving this out of the crush map allow us to treat the CRUSH map as
|
||||
* immutable within the mapper and removes the requirement for a CRUSH
|
||||
* map lock.
|
||||
*/
|
||||
struct crush_work_bucket {
|
||||
__u32 perm_x; /* @x for which *perm is defined */
|
||||
__u32 perm_n; /* num elements of *perm that are permuted/defined */
|
||||
__u32 *perm; /* Permutation of the bucket's items */
|
||||
};
|
||||
|
||||
struct crush_work {
|
||||
struct crush_work_bucket **work; /* Per-bucket working store */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -15,6 +15,20 @@ extern int crush_do_rule(const struct crush_map *map,
|
||||
int ruleno,
|
||||
int x, int *result, int result_max,
|
||||
const __u32 *weights, int weight_max,
|
||||
int *scratch);
|
||||
void *cwin);
|
||||
|
||||
/*
|
||||
* Returns the exact amount of workspace that will need to be used
|
||||
* for a given combination of crush_map and result_max. The caller can
|
||||
* then allocate this much on its own, either on the stack, in a
|
||||
* per-thread long-lived buffer, or however it likes.
|
||||
*/
|
||||
static inline size_t crush_work_size(const struct crush_map *map,
|
||||
int result_max)
|
||||
{
|
||||
return map->working_size + result_max * 3 * sizeof(__u32);
|
||||
}
|
||||
|
||||
void crush_init_workspace(const struct crush_map *map, void *v);
|
||||
|
||||
#endif
|
||||
|
@@ -37,9 +37,9 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
|
||||
}
|
||||
|
||||
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
struct iomap_ops *ops);
|
||||
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
struct iomap_ops *ops);
|
||||
const struct iomap_ops *ops);
|
||||
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
|
||||
const struct iomap_ops *ops);
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||
@@ -71,21 +71,13 @@ static inline unsigned int dax_radix_order(void *entry)
|
||||
return PMD_SHIFT - PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmd, unsigned int flags, struct iomap_ops *ops);
|
||||
#else
|
||||
static inline unsigned int dax_radix_order(void *entry)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd, unsigned int flags,
|
||||
struct iomap_ops *ops)
|
||||
{
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
#endif
|
||||
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
|
||||
int dax_pfn_mkwrite(struct vm_fault *vmf);
|
||||
|
||||
static inline bool vma_is_dax(struct vm_area_struct *vma)
|
||||
{
|
||||
|
@@ -11,6 +11,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/lockref.h>
|
||||
#include <linux/stringhash.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct path;
|
||||
struct vfsmount;
|
||||
@@ -562,7 +563,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
|
||||
* @inode: inode to select the dentry from multiple layers (can be NULL)
|
||||
* @flags: open flags to control copy-up behavior
|
||||
*
|
||||
* If dentry is on an union/overlay, then return the underlying, real dentry.
|
||||
* If dentry is on a union/overlay, then return the underlying, real dentry.
|
||||
* Otherwise return the dentry itself.
|
||||
*
|
||||
* See also: Documentation/filesystems/vfs.txt
|
||||
@@ -581,7 +582,7 @@ static inline struct dentry *d_real(struct dentry *dentry,
|
||||
* d_real_inode - Return the real inode
|
||||
* @dentry: The dentry to query
|
||||
*
|
||||
* If dentry is on an union/overlay, then return the underlying, real inode.
|
||||
* If dentry is on a union/overlay, then return the underlying, real inode.
|
||||
* Otherwise return d_inode().
|
||||
*/
|
||||
static inline struct inode *d_real_inode(const struct dentry *dentry)
|
||||
|
@@ -163,6 +163,7 @@ struct dccp_request_sock {
|
||||
__u64 dreq_isr;
|
||||
__u64 dreq_gsr;
|
||||
__be32 dreq_service;
|
||||
spinlock_t dreq_lock;
|
||||
struct list_head dreq_featneg;
|
||||
__u32 dreq_timestamp_echo;
|
||||
__u32 dreq_timestamp_time;
|
||||
|
@@ -52,8 +52,7 @@ extern struct srcu_struct debugfs_srcu;
|
||||
* Must only be called under the protection established by
|
||||
* debugfs_use_file_start().
|
||||
*/
|
||||
static inline const struct file_operations *
|
||||
debugfs_real_fops(const struct file *filp)
|
||||
static inline const struct file_operations *debugfs_real_fops(const struct file *filp)
|
||||
__must_hold(&debugfs_srcu)
|
||||
{
|
||||
/*
|
||||
@@ -99,9 +98,10 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
|
||||
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
|
||||
const char *dest);
|
||||
|
||||
typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
|
||||
struct dentry *debugfs_create_automount(const char *name,
|
||||
struct dentry *parent,
|
||||
struct vfsmount *(*f)(void *),
|
||||
debugfs_automount_t f,
|
||||
void *data);
|
||||
|
||||
void debugfs_remove(struct dentry *dentry);
|
||||
|
@@ -18,8 +18,6 @@
|
||||
#define _LINUX_DELAYACCT_H
|
||||
|
||||
#include <uapi/linux/taskstats.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* Per-task flags relevant to delay accounting
|
||||
@@ -30,7 +28,43 @@
|
||||
#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
|
||||
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
struct task_delay_info {
|
||||
spinlock_t lock;
|
||||
unsigned int flags; /* Private per-task flags */
|
||||
|
||||
/* For each stat XXX, add following, aligned appropriately
|
||||
*
|
||||
* struct timespec XXX_start, XXX_end;
|
||||
* u64 XXX_delay;
|
||||
* u32 XXX_count;
|
||||
*
|
||||
* Atomicity of updates to XXX_delay, XXX_count protected by
|
||||
* single lock above (split into XXX_lock if contention is an issue).
|
||||
*/
|
||||
|
||||
/*
|
||||
* XXX_count is incremented on every XXX operation, the delay
|
||||
* associated with the operation is added to XXX_delay.
|
||||
* XXX_delay contains the accumulated delay time in nanoseconds.
|
||||
*/
|
||||
u64 blkio_start; /* Shared by blkio, swapin */
|
||||
u64 blkio_delay; /* wait for sync block io completion */
|
||||
u64 swapin_delay; /* wait for swapin block io completion */
|
||||
u32 blkio_count; /* total count of the number of sync block */
|
||||
/* io operations performed */
|
||||
u32 swapin_count; /* total count of the number of swapin block */
|
||||
/* io operations performed */
|
||||
|
||||
u64 freepages_start;
|
||||
u64 freepages_delay; /* wait for memory reclaim */
|
||||
u32 freepages_count; /* total count of memory reclaim */
|
||||
};
|
||||
#endif
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
extern int delayacct_on; /* Delay accounting turned on/off */
|
||||
extern struct kmem_cache *delayacct_cache;
|
||||
extern void delayacct_init(void);
|
||||
|
@@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
||||
*
|
||||
* @suspend: Called when a device on this bus wants to go to sleep mode.
|
||||
* @resume: Called to bring a device on this bus out of sleep mode.
|
||||
* @num_vf: Called to find out how many virtual functions a device on this
|
||||
* bus supports.
|
||||
* @pm: Power management operations of this bus, callback the specific
|
||||
* device driver's pm-ops.
|
||||
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
|
||||
@@ -127,6 +129,8 @@ struct bus_type {
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
|
||||
int (*num_vf)(struct device *dev);
|
||||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
const struct iommu_ops *iommu_ops;
|
||||
@@ -921,6 +925,7 @@ struct device {
|
||||
#ifdef CONFIG_NUMA
|
||||
int numa_node; /* NUMA node this device is close to */
|
||||
#endif
|
||||
const struct dma_map_ops *dma_ops;
|
||||
u64 *dma_mask; /* dma mask (if dma'able device) */
|
||||
u64 coherent_dma_mask;/* Like dma_mask, but for
|
||||
alloc_coherent mappings as
|
||||
@@ -1140,6 +1145,13 @@ extern int device_online(struct device *dev);
|
||||
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
|
||||
extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
|
||||
|
||||
static inline int dev_num_vf(struct device *dev)
|
||||
{
|
||||
if (dev->bus && dev->bus->num_vf)
|
||||
return dev->bus->num_vf(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Root device objects for grouping under /sys/devices
|
||||
*/
|
||||
|
@@ -39,23 +39,6 @@ struct dma_buf_attachment;
|
||||
|
||||
/**
|
||||
* struct dma_buf_ops - operations possible on struct dma_buf
|
||||
* @attach: [optional] allows different devices to 'attach' themselves to the
|
||||
* given buffer. It might return -EBUSY to signal that backing storage
|
||||
* is already allocated and incompatible with the requirements
|
||||
* of requesting device.
|
||||
* @detach: [optional] detach a given device from this buffer.
|
||||
* @map_dma_buf: returns list of scatter pages allocated, increases usecount
|
||||
* of the buffer. Requires atleast one attach to be called
|
||||
* before. Returned sg list should already be mapped into
|
||||
* _device_ address space. This call may sleep. May also return
|
||||
* -EINTR. Should return -EINVAL if attach hasn't been called yet.
|
||||
* @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
|
||||
* pages.
|
||||
* @release: release this buffer; to be called after the last dma_buf_put.
|
||||
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
|
||||
* caches and allocate backing storage (if not yet done)
|
||||
* respectively pin the object into memory.
|
||||
* @end_cpu_access: [optional] called after cpu access to flush caches.
|
||||
* @kmap_atomic: maps a page from the buffer into kernel address
|
||||
* space, users may not block until the subsequent unmap call.
|
||||
* This callback must not sleep.
|
||||
@@ -63,43 +46,206 @@ struct dma_buf_attachment;
|
||||
* This Callback must not sleep.
|
||||
* @kmap: maps a page from the buffer into kernel address space.
|
||||
* @kunmap: [optional] unmaps a page from the buffer.
|
||||
* @mmap: used to expose the backing storage to userspace. Note that the
|
||||
* mapping needs to be coherent - if the exporter doesn't directly
|
||||
* support this, it needs to fake coherency by shooting down any ptes
|
||||
* when transitioning away from the cpu domain.
|
||||
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
|
||||
* address space. Same restrictions as for vmap and friends apply.
|
||||
* @vunmap: [optional] unmaps a vmap from the buffer
|
||||
*/
|
||||
struct dma_buf_ops {
|
||||
/**
|
||||
* @attach:
|
||||
*
|
||||
* This is called from dma_buf_attach() to make sure that a given
|
||||
* &device can access the provided &dma_buf. Exporters which support
|
||||
* buffer objects in special locations like VRAM or device-specific
|
||||
* carveout areas should check whether the buffer could be move to
|
||||
* system memory (or directly accessed by the provided device), and
|
||||
* otherwise need to fail the attach operation.
|
||||
*
|
||||
* The exporter should also in general check whether the current
|
||||
* allocation fullfills the DMA constraints of the new device. If this
|
||||
* is not the case, and the allocation cannot be moved, it should also
|
||||
* fail the attach operation.
|
||||
*
|
||||
* Any exporter-private housekeeping data can be stored in the
|
||||
* &dma_buf_attachment.priv pointer.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success, negative error code on failure. It might return -EBUSY
|
||||
* to signal that backing storage is already allocated and incompatible
|
||||
* with the requirements of requesting device.
|
||||
*/
|
||||
int (*attach)(struct dma_buf *, struct device *,
|
||||
struct dma_buf_attachment *);
|
||||
struct dma_buf_attachment *);
|
||||
|
||||
/**
|
||||
* @detach:
|
||||
*
|
||||
* This is called by dma_buf_detach() to release a &dma_buf_attachment.
|
||||
* Provided so that exporters can clean up any housekeeping for an
|
||||
* &dma_buf_attachment.
|
||||
*
|
||||
* This callback is optional.
|
||||
*/
|
||||
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
|
||||
|
||||
/* For {map,unmap}_dma_buf below, any specific buffer attributes
|
||||
* required should get added to device_dma_parameters accessible
|
||||
* via dev->dma_params.
|
||||
/**
|
||||
* @map_dma_buf:
|
||||
*
|
||||
* This is called by dma_buf_map_attachment() and is used to map a
|
||||
* shared &dma_buf into device address space, and it is mandatory. It
|
||||
* can only be called if @attach has been called successfully. This
|
||||
* essentially pins the DMA buffer into place, and it cannot be moved
|
||||
* any more
|
||||
*
|
||||
* This call may sleep, e.g. when the backing storage first needs to be
|
||||
* allocated, or moved to a location suitable for all currently attached
|
||||
* devices.
|
||||
*
|
||||
* Note that any specific buffer attributes required for this function
|
||||
* should get added to device_dma_parameters accessible via
|
||||
* &device.dma_params from the &dma_buf_attachment. The @attach callback
|
||||
* should also check these constraints.
|
||||
*
|
||||
* If this is being called for the first time, the exporter can now
|
||||
* choose to scan through the list of attachments for this buffer,
|
||||
* collate the requirements of the attached devices, and choose an
|
||||
* appropriate backing storage for the buffer.
|
||||
*
|
||||
* Based on enum dma_data_direction, it might be possible to have
|
||||
* multiple users accessing at the same time (for reading, maybe), or
|
||||
* any other kind of sharing that the exporter might wish to make
|
||||
* available to buffer-users.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* A &sg_table scatter list of or the backing storage of the DMA buffer,
|
||||
* already mapped into the device address space of the &device attached
|
||||
* with the provided &dma_buf_attachment.
|
||||
*
|
||||
* On failure, returns a negative error value wrapped into a pointer.
|
||||
* May also return -EINTR when a signal was received while being
|
||||
* blocked.
|
||||
*/
|
||||
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
|
||||
enum dma_data_direction);
|
||||
enum dma_data_direction);
|
||||
/**
|
||||
* @unmap_dma_buf:
|
||||
*
|
||||
* This is called by dma_buf_unmap_attachment() and should unmap and
|
||||
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
|
||||
* It should also unpin the backing storage if this is the last mapping
|
||||
* of the DMA buffer, it the exporter supports backing storage
|
||||
* migration.
|
||||
*/
|
||||
void (*unmap_dma_buf)(struct dma_buf_attachment *,
|
||||
struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
|
||||
/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
|
||||
* if the call would block.
|
||||
*/
|
||||
|
||||
/* after final dma_buf_put() */
|
||||
/**
|
||||
* @release:
|
||||
*
|
||||
* Called after the last dma_buf_put to release the &dma_buf, and
|
||||
* mandatory.
|
||||
*/
|
||||
void (*release)(struct dma_buf *);
|
||||
|
||||
/**
|
||||
* @begin_cpu_access:
|
||||
*
|
||||
* This is called from dma_buf_begin_cpu_access() and allows the
|
||||
* exporter to ensure that the memory is actually available for cpu
|
||||
* access - the exporter might need to allocate or swap-in and pin the
|
||||
* backing storage. The exporter also needs to ensure that cpu access is
|
||||
* coherent for the access direction. The direction can be used by the
|
||||
* exporter to optimize the cache flushing, i.e. access with a different
|
||||
* direction (read instead of write) might return stale or even bogus
|
||||
* data (e.g. when the exporter needs to copy the data to temporary
|
||||
* storage).
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
|
||||
* from userspace (where storage shouldn't be pinned to avoid handing
|
||||
* de-factor mlock rights to userspace) and for the kernel-internal
|
||||
* users of the various kmap interfaces, where the backing storage must
|
||||
* be pinned to guarantee that the atomic kmap calls can succeed. Since
|
||||
* there's no in-kernel users of the kmap interfaces yet this isn't a
|
||||
* real problem.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. This can for
|
||||
* example fail when the backing storage can't be allocated. Can also
|
||||
* return -ERESTARTSYS or -EINTR when the call has been interrupted and
|
||||
* needs to be restarted.
|
||||
*/
|
||||
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
|
||||
/**
|
||||
* @end_cpu_access:
|
||||
*
|
||||
* This is called from dma_buf_end_cpu_access() when the importer is
|
||||
* done accessing the CPU. The exporter can use this to flush caches and
|
||||
* unpin any resources pinned in @begin_cpu_access.
|
||||
* The result of any dma_buf kmap calls after end_cpu_access is
|
||||
* undefined.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure. Can return
|
||||
* -ERESTARTSYS or -EINTR when the call has been interrupted and needs
|
||||
* to be restarted.
|
||||
*/
|
||||
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
||||
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*kmap)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap)(struct dma_buf *, unsigned long, void *);
|
||||
|
||||
/**
|
||||
* @mmap:
|
||||
*
|
||||
* This callback is used by the dma_buf_mmap() function
|
||||
*
|
||||
* Note that the mapping needs to be incoherent, userspace is expected
|
||||
* to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
|
||||
*
|
||||
* Because dma-buf buffers have invariant size over their lifetime, the
|
||||
* dma-buf core checks whether a vma is too large and rejects such
|
||||
* mappings. The exporter hence does not need to duplicate this check.
|
||||
* Drivers do not need to check this themselves.
|
||||
*
|
||||
* If an exporter needs to manually flush caches and hence needs to fake
|
||||
* coherency for mmap support, it needs to be able to zap all the ptes
|
||||
* pointing at the backing storage. Now linux mm needs a struct
|
||||
* address_space associated with the struct file stored in vma->vm_file
|
||||
* to do that with the function unmap_mapping_range. But the dma_buf
|
||||
* framework only backs every dma_buf fd with the anon_file struct file,
|
||||
* i.e. all dma_bufs share the same file.
|
||||
*
|
||||
* Hence exporters need to setup their own file (and address_space)
|
||||
* association by setting vma->vm_file and adjusting vma->vm_pgoff in
|
||||
* the dma_buf mmap callback. In the specific case of a gem driver the
|
||||
* exporter could use the shmem file already provided by gem (and set
|
||||
* vm_pgoff = 0). Exporters can then zap ptes by unmapping the
|
||||
* corresponding range of the struct address_space associated with their
|
||||
* own file.
|
||||
*
|
||||
* This callback is optional.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
|
||||
|
||||
void *(*vmap)(struct dma_buf *);
|
||||
@@ -124,6 +270,15 @@ struct dma_buf_ops {
|
||||
* @poll: for userspace poll support
|
||||
* @cb_excl: for userspace poll support
|
||||
* @cb_shared: for userspace poll support
|
||||
*
|
||||
* This represents a shared buffer, created by calling dma_buf_export(). The
|
||||
* userspace representation is a normal file descriptor, which can be created by
|
||||
* calling dma_buf_fd().
|
||||
*
|
||||
* Shared dma buffers are reference counted using dma_buf_put() and
|
||||
* get_dma_buf().
|
||||
*
|
||||
* Device DMA access is handled by the separate &struct dma_buf_attachment.
|
||||
*/
|
||||
struct dma_buf {
|
||||
size_t size;
|
||||
@@ -160,6 +315,11 @@ struct dma_buf {
|
||||
* This structure holds the attachment information between the dma_buf buffer
|
||||
* and its user device(s). The list contains one attachment struct per device
|
||||
* attached to the buffer.
|
||||
*
|
||||
* An attachment is created by calling dma_buf_attach(), and released again by
|
||||
* calling dma_buf_detach(). The DMA mapping itself needed to initiate a
|
||||
* transfer is created by dma_buf_map_attachment() and freed again by calling
|
||||
* dma_buf_unmap_attachment().
|
||||
*/
|
||||
struct dma_buf_attachment {
|
||||
struct dma_buf *dmabuf;
|
||||
@@ -192,9 +352,11 @@ struct dma_buf_export_info {
|
||||
};
|
||||
|
||||
/**
|
||||
* helper macro for exporters; zeros and fills in most common values
|
||||
*
|
||||
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
|
||||
* @name: export-info name
|
||||
*
|
||||
* DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
|
||||
* zeroes it out and pre-populates exp_name in it.
|
||||
*/
|
||||
#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
|
||||
struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
|
||||
|
@@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
||||
}
|
||||
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||
unsigned int order);
|
||||
unsigned int order, gfp_t gfp_mask);
|
||||
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
int count);
|
||||
|
||||
@@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
||||
|
||||
static inline
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||
unsigned int order)
|
||||
unsigned int order, gfp_t gfp_mask)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -47,7 +47,7 @@ struct dma_fence_cb;
|
||||
* can be compared to decide which fence would be signaled later.
|
||||
* @flags: A mask of DMA_FENCE_FLAG_* defined below
|
||||
* @timestamp: Timestamp when the fence was signaled.
|
||||
* @status: Optional, only valid if < 0, must be set before calling
|
||||
* @error: Optional, only valid if < 0, must be set before calling
|
||||
* dma_fence_signal, indicates that the fence has completed with an error.
|
||||
*
|
||||
* the flags member must be manipulated and read using the appropriate
|
||||
@@ -79,7 +79,7 @@ struct dma_fence {
|
||||
unsigned seqno;
|
||||
unsigned long flags;
|
||||
ktime_t timestamp;
|
||||
int status;
|
||||
int error;
|
||||
};
|
||||
|
||||
enum dma_fence_flag_bits {
|
||||
@@ -133,7 +133,7 @@ struct dma_fence_cb {
|
||||
* or some failure occurred that made it impossible to enable
|
||||
* signaling. True indicates successful enabling.
|
||||
*
|
||||
* fence->status may be set in enable_signaling, but only when false is
|
||||
* fence->error may be set in enable_signaling, but only when false is
|
||||
* returned.
|
||||
*
|
||||
* Calling dma_fence_signal before enable_signaling is called allows
|
||||
@@ -145,7 +145,7 @@ struct dma_fence_cb {
|
||||
* the second time will be a noop since it was already signaled.
|
||||
*
|
||||
* Notes on signaled:
|
||||
* May set fence->status if returning true.
|
||||
* May set fence->error if returning true.
|
||||
*
|
||||
* Notes on wait:
|
||||
* Must not be NULL, set to dma_fence_default_wait for default implementation.
|
||||
@@ -378,6 +378,50 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
|
||||
return dma_fence_is_signaled(f2) ? NULL : f2;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_get_status_locked - returns the status upon completion
|
||||
* @fence: [in] the dma_fence to query
|
||||
*
|
||||
* Drivers can supply an optional error status condition before they signal
|
||||
* the fence (to indicate whether the fence was completed due to an error
|
||||
* rather than success). The value of the status condition is only valid
|
||||
* if the fence has been signaled, dma_fence_get_status_locked() first checks
|
||||
* the signal state before reporting the error status.
|
||||
*
|
||||
* Returns 0 if the fence has not yet been signaled, 1 if the fence has
|
||||
* been signaled without an error condition, or a negative error code
|
||||
* if the fence has been completed in err.
|
||||
*/
|
||||
static inline int dma_fence_get_status_locked(struct dma_fence *fence)
|
||||
{
|
||||
if (dma_fence_is_signaled_locked(fence))
|
||||
return fence->error ?: 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dma_fence_get_status(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* dma_fence_set_error - flag an error condition on the fence
|
||||
* @fence: [in] the dma_fence
|
||||
* @error: [in] the error to store
|
||||
*
|
||||
* Drivers can supply an optional error status condition before they signal
|
||||
* the fence, to indicate that the fence was completed due to an error
|
||||
* rather than success. This must be set before signaling (so that the value
|
||||
* is visible before any waiters on the signal callback are woken). This
|
||||
* helper exists to help catching erroneous setting of #dma_fence.error.
|
||||
*/
|
||||
static inline void dma_fence_set_error(struct dma_fence *fence,
|
||||
int error)
|
||||
{
|
||||
BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
|
||||
BUG_ON(error >= 0 || error < -MAX_ERRNO);
|
||||
|
||||
fence->error = error;
|
||||
}
|
||||
|
||||
signed long dma_fence_wait_timeout(struct dma_fence *,
|
||||
bool intr, signed long timeout);
|
||||
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
||||
|
@@ -134,7 +134,8 @@ struct dma_map_ops {
|
||||
int is_phys;
|
||||
};
|
||||
|
||||
extern struct dma_map_ops dma_noop_ops;
|
||||
extern const struct dma_map_ops dma_noop_ops;
|
||||
extern const struct dma_map_ops dma_virt_ops;
|
||||
|
||||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
||||
|
||||
@@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
#include <asm/dma-mapping.h>
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->dma_ops)
|
||||
return dev->dma_ops;
|
||||
return get_arch_dma_ops(dev ? dev->bus : NULL);
|
||||
}
|
||||
|
||||
static inline void set_dma_ops(struct device *dev,
|
||||
const struct dma_map_ops *dma_ops)
|
||||
{
|
||||
dev->dma_ops = dma_ops;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Define the dma api to allow compilation but not linking of
|
||||
* dma dependent code. Code that depends on the dma-mapping
|
||||
* API needs to set 'depends on HAS_DMA' in its Kconfig
|
||||
*/
|
||||
extern struct dma_map_ops bad_dma_ops;
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
extern const struct dma_map_ops bad_dma_ops;
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
return &bad_dma_ops;
|
||||
}
|
||||
@@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
kmemcheck_mark_initialized(ptr, size);
|
||||
@@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_page)
|
||||
@@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
int i, ents;
|
||||
struct scatterlist *s;
|
||||
|
||||
@@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_unmap_sg(dev, sg, nents, dir);
|
||||
@@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
kmemcheck_mark_initialized(page_address(page) + offset, size);
|
||||
@@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_page)
|
||||
@@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
@@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_resource)
|
||||
@@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_cpu)
|
||||
@@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_device)
|
||||
@@ -371,7 +384,7 @@ static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_sg_for_cpu)
|
||||
@@ -383,7 +396,7 @@ static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_sg_for_device)
|
||||
@@ -428,7 +441,7 @@ static inline int
|
||||
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
if (ops->mmap)
|
||||
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
@@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
if (ops->get_sgtable)
|
||||
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
|
||||
@@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
BUG_ON(!ops);
|
||||
@@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
WARN_ON(irqs_disabled());
|
||||
@@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
#ifndef HAVE_ARCH_DMA_SUPPORTED
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (!ops)
|
||||
return 0;
|
||||
@@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
|
||||
#ifndef HAVE_ARCH_DMA_SET_MASK
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (ops->set_dma_mask)
|
||||
return ops->set_dma_mask(dev, mask);
|
||||
|
@@ -26,7 +26,7 @@
|
||||
#include <linux/msi.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/rculist.h>
|
||||
|
||||
struct acpi_dmar_header;
|
||||
|
||||
|
@@ -3,6 +3,8 @@
|
||||
|
||||
#include <linux/user.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/elf.h>
|
||||
#include <uapi/linux/elfcore.h>
|
||||
|
||||
|
@@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
|
||||
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
|
||||
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
|
||||
|
||||
struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
|
||||
unsigned int txqs,
|
||||
unsigned int rxqs);
|
||||
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
|
||||
|
||||
struct sk_buff **eth_gro_receive(struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
int eth_gro_complete(struct sk_buff *skb, int nhoff);
|
||||
@@ -396,6 +401,66 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
|
||||
* @addr: Pointer to a six-byte array containing the Ethernet address
|
||||
*
|
||||
* Return a u64 value of the address
|
||||
*/
|
||||
static inline u64 ether_addr_to_u64(const u8 *addr)
|
||||
{
|
||||
u64 u = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
u = u << 8 | addr[i];
|
||||
|
||||
return u;
|
||||
}
|
||||
|
||||
/**
|
||||
* u64_to_ether_addr - Convert a u64 to an Ethernet address.
|
||||
* @u: u64 to convert to an Ethernet MAC address
|
||||
* @addr: Pointer to a six-byte array to contain the Ethernet address
|
||||
*/
|
||||
static inline void u64_to_ether_addr(u64 u, u8 *addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ETH_ALEN - 1; i >= 0; i--) {
|
||||
addr[i] = u & 0xff;
|
||||
u = u >> 8;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* eth_addr_dec - Decrement the given MAC address
|
||||
*
|
||||
* @addr: Pointer to a six-byte array containing Ethernet address to decrement
|
||||
*/
|
||||
static inline void eth_addr_dec(u8 *addr)
|
||||
{
|
||||
u64 u = ether_addr_to_u64(addr);
|
||||
|
||||
u--;
|
||||
u64_to_ether_addr(u, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* ether_addr_greater - Compare two Ethernet addresses
|
||||
* @addr1: Pointer to a six-byte array containing the Ethernet address
|
||||
* @addr2: Pointer other six-byte array containing the Ethernet address
|
||||
*
|
||||
* Compare two Ethernet addresses, returns true addr1 is greater than addr2
|
||||
*/
|
||||
static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
|
||||
{
|
||||
u64 u1 = ether_addr_to_u64(addr1);
|
||||
u64 u2 = ether_addr_to_u64(addr2);
|
||||
|
||||
return u1 > u2;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
|
||||
* @dev: Pointer to a device structure
|
||||
|
@@ -46,7 +46,18 @@
|
||||
#define EXTCON_USB 1
|
||||
#define EXTCON_USB_HOST 2
|
||||
|
||||
/* Charging external connector */
|
||||
/*
|
||||
* Charging external connector
|
||||
*
|
||||
* When one SDP charger connector was reported, we should also report
|
||||
* the USB connector, which means EXTCON_CHG_USB_SDP should always
|
||||
* appear together with EXTCON_USB. The same as ACA charger connector,
|
||||
* EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST.
|
||||
*
|
||||
* The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of
|
||||
* current at 5V. The EXTCON_CHG_USB_FAST connector can provide at
|
||||
* least 1A of current at 5V.
|
||||
*/
|
||||
#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */
|
||||
#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */
|
||||
#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */
|
||||
@@ -54,6 +65,7 @@
|
||||
#define EXTCON_CHG_USB_FAST 9
|
||||
#define EXTCON_CHG_USB_SLOW 10
|
||||
#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */
|
||||
#define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */
|
||||
|
||||
/* Jack external connector */
|
||||
#define EXTCON_JACK_MICROPHONE 20
|
||||
@@ -160,62 +172,7 @@ union extcon_property_value {
|
||||
};
|
||||
|
||||
struct extcon_cable;
|
||||
|
||||
/**
|
||||
* struct extcon_dev - An extcon device represents one external connector.
|
||||
* @name: The name of this extcon device. Parent device name is
|
||||
* used if NULL.
|
||||
* @supported_cable: Array of supported cable names ending with EXTCON_NONE.
|
||||
* If supported_cable is NULL, cable name related APIs
|
||||
* are disabled.
|
||||
* @mutually_exclusive: Array of mutually exclusive set of cables that cannot
|
||||
* be attached simultaneously. The array should be
|
||||
* ending with NULL or be NULL (no mutually exclusive
|
||||
* cables). For example, if it is { 0x7, 0x30, 0}, then,
|
||||
* {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
|
||||
* be attached simulataneously. {0x7, 0} is equivalent to
|
||||
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
|
||||
* can be no simultaneous connections.
|
||||
* @dev: Device of this extcon.
|
||||
* @state: Attach/detach state of this extcon. Do not provide at
|
||||
* register-time.
|
||||
* @nh: Notifier for the state change events from this extcon
|
||||
* @entry: To support list of extcon devices so that users can
|
||||
* search for extcon devices based on the extcon name.
|
||||
* @lock:
|
||||
* @max_supported: Internal value to store the number of cables.
|
||||
* @extcon_dev_type: Device_type struct to provide attribute_groups
|
||||
* customized for each extcon device.
|
||||
* @cables: Sysfs subdirectories. Each represents one cable.
|
||||
*
|
||||
* In most cases, users only need to provide "User initializing data" of
|
||||
* this struct when registering an extcon. In some exceptional cases,
|
||||
* optional callbacks may be needed. However, the values in "internal data"
|
||||
* are overwritten by register function.
|
||||
*/
|
||||
struct extcon_dev {
|
||||
/* Optional user initializing data */
|
||||
const char *name;
|
||||
const unsigned int *supported_cable;
|
||||
const u32 *mutually_exclusive;
|
||||
|
||||
/* Internal data. Please do not set. */
|
||||
struct device dev;
|
||||
struct raw_notifier_head *nh;
|
||||
struct list_head entry;
|
||||
int max_supported;
|
||||
spinlock_t lock; /* could be called by irq handler */
|
||||
u32 state;
|
||||
|
||||
/* /sys/class/extcon/.../cable.n/... */
|
||||
struct device_type extcon_dev_type;
|
||||
struct extcon_cable *cables;
|
||||
|
||||
/* /sys/class/extcon/.../mutually_exclusive/... */
|
||||
struct attribute_group attr_g_muex;
|
||||
struct attribute **attrs_muex;
|
||||
struct device_attribute *d_attrs_muex;
|
||||
};
|
||||
struct extcon_dev;
|
||||
|
||||
#if IS_ENABLED(CONFIG_EXTCON)
|
||||
|
||||
|
@@ -59,7 +59,7 @@ struct adc_jack_pdata {
|
||||
const char *name;
|
||||
const char *consumer_channel;
|
||||
|
||||
const enum extcon *cable_names;
|
||||
const unsigned int *cable_names;
|
||||
|
||||
/* The last entry's state should be 0 */
|
||||
struct adc_jack_cond *adc_conditions;
|
||||
|
@@ -36,6 +36,12 @@
|
||||
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
|
||||
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
|
||||
|
||||
#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
|
||||
#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
|
||||
#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
|
||||
#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */
|
||||
#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
|
||||
|
||||
/* This flag is used by node and meta inodes, and by recovery */
|
||||
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
|
||||
#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
|
||||
@@ -108,6 +114,7 @@ struct f2fs_super_block {
|
||||
/*
|
||||
* For checkpoint
|
||||
*/
|
||||
#define CP_NAT_BITS_FLAG 0x00000080
|
||||
#define CP_CRC_RECOVERY_FLAG 0x00000040
|
||||
#define CP_FASTBOOT_FLAG 0x00000020
|
||||
#define CP_FSCK_FLAG 0x00000010
|
||||
@@ -272,6 +279,7 @@ struct f2fs_node {
|
||||
* For NAT entries
|
||||
*/
|
||||
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
|
||||
#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
|
||||
|
||||
struct f2fs_nat_entry {
|
||||
__u8 version; /* latest version of cached nat entry */
|
||||
|
@@ -61,6 +61,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
|
||||
|
||||
#endif /* CONFIG_FAULT_INJECTION */
|
||||
|
||||
struct kmem_cache;
|
||||
|
||||
#ifdef CONFIG_FAILSLAB
|
||||
extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
|
||||
#else
|
||||
|
@@ -54,6 +54,12 @@ struct bpf_prog_aux;
|
||||
#define BPF_REG_AX MAX_BPF_REG
|
||||
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
|
||||
|
||||
/* As per nm, we expose JITed images as text (code) section for
|
||||
* kallsyms. That way, tools like perf can find it to match
|
||||
* addresses.
|
||||
*/
|
||||
#define BPF_SYM_ELF_TYPE 't'
|
||||
|
||||
/* BPF program can access up to 512 bytes of stack space. */
|
||||
#define MAX_BPF_STACK 512
|
||||
|
||||
@@ -403,6 +409,7 @@ struct bpf_prog {
|
||||
u16 pages; /* Number of allocated pages */
|
||||
kmemcheck_bitfield_begin(meta);
|
||||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
locked:1, /* Program image locked? */
|
||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
@@ -545,15 +552,32 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
|
||||
|
||||
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||
|
||||
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
|
||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
set_memory_ro((unsigned long)fp, fp->pages);
|
||||
fp->locked = 1;
|
||||
WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
|
||||
}
|
||||
|
||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
set_memory_rw((unsigned long)fp, fp->pages);
|
||||
if (fp->locked) {
|
||||
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
|
||||
/* In case set_memory_rw() fails, we want to be the first
|
||||
* to crash here instead of some random place later on.
|
||||
*/
|
||||
fp->locked = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
|
||||
}
|
||||
|
||||
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
|
||||
}
|
||||
#else
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
@@ -563,7 +587,24 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
|
||||
|
||||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
|
||||
|
||||
static inline struct bpf_binary_header *
|
||||
bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
||||
{
|
||||
unsigned long real_start = (unsigned long)fp->bpf_func;
|
||||
unsigned long addr = real_start & PAGE_MASK;
|
||||
|
||||
return (void *)addr;
|
||||
}
|
||||
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||
@@ -607,6 +648,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
||||
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
||||
void bpf_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
@@ -616,6 +658,7 @@ void bpf_warn_invalid_xdp_action(u32 act);
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
extern int bpf_jit_enable;
|
||||
extern int bpf_jit_harden;
|
||||
extern int bpf_jit_kallsyms;
|
||||
|
||||
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
||||
|
||||
@@ -625,7 +668,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
||||
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *fp);
|
||||
void bpf_jit_free(struct bpf_prog *fp);
|
||||
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
|
||||
@@ -651,6 +693,11 @@ static inline bool bpf_jit_is_ebpf(void)
|
||||
# endif
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
|
||||
{
|
||||
return fp->jited && bpf_jit_is_ebpf();
|
||||
}
|
||||
|
||||
static inline bool bpf_jit_blinding_enabled(void)
|
||||
{
|
||||
/* These are the prerequisites, should someone ever have the
|
||||
@@ -668,15 +715,91 @@ static inline bool bpf_jit_blinding_enabled(void)
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline void bpf_jit_compile(struct bpf_prog *fp)
|
||||
|
||||
static inline bool bpf_jit_kallsyms_enabled(void)
|
||||
{
|
||||
/* There are a couple of corner cases where kallsyms should
|
||||
* not be enabled f.e. on hardening.
|
||||
*/
|
||||
if (bpf_jit_harden)
|
||||
return false;
|
||||
if (!bpf_jit_kallsyms)
|
||||
return false;
|
||||
if (bpf_jit_kallsyms == 1)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
unsigned long *off, char *sym);
|
||||
bool is_bpf_text_address(unsigned long addr);
|
||||
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
char *sym);
|
||||
|
||||
static inline const char *
|
||||
bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
unsigned long *off, char **modname, char *sym)
|
||||
{
|
||||
const char *ret = __bpf_address_lookup(addr, size, off, sym);
|
||||
|
||||
if (ret && modname)
|
||||
*modname = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bpf_prog_kallsyms_add(struct bpf_prog *fp);
|
||||
void bpf_prog_kallsyms_del(struct bpf_prog *fp);
|
||||
|
||||
#else /* CONFIG_BPF_JIT */
|
||||
|
||||
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
bpf_prog_unlock_free(fp);
|
||||
}
|
||||
|
||||
static inline bool bpf_jit_kallsyms_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline const char *
|
||||
__bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
unsigned long *off, char *sym)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool is_bpf_text_address(unsigned long addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
|
||||
char *type, char *sym)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline const char *
|
||||
bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
unsigned long *off, char **modname, char *sym)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
#define BPF_ANC BIT(15)
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#define _LINUX_FPGA_MGR_H
|
||||
|
||||
struct fpga_manager;
|
||||
struct sg_table;
|
||||
|
||||
/**
|
||||
* enum fpga_mgr_states - fpga framework states
|
||||
@@ -88,6 +89,7 @@ struct fpga_image_info {
|
||||
* @state: returns an enum value of the FPGA's state
|
||||
* @write_init: prepare the FPGA to receive confuration data
|
||||
* @write: write count bytes of configuration data to the FPGA
|
||||
* @write_sg: write the scatter list of configuration data to the FPGA
|
||||
* @write_complete: set FPGA to operating state after writing is done
|
||||
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
|
||||
*
|
||||
@@ -102,6 +104,7 @@ struct fpga_manager_ops {
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count);
|
||||
int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
|
||||
int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
|
||||
int (*write_complete)(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info);
|
||||
void (*fpga_remove)(struct fpga_manager *mgr);
|
||||
@@ -129,6 +132,8 @@ struct fpga_manager {
|
||||
|
||||
int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
|
||||
const char *buf, size_t count);
|
||||
int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
|
||||
struct sg_table *sgt);
|
||||
|
||||
int fpga_mgr_firmware_load(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
|
@@ -11,7 +11,7 @@
|
||||
* For more information, see tools/objtool/Documentation/stack-validation.txt.
|
||||
*/
|
||||
#define STACK_FRAME_NON_STANDARD(func) \
|
||||
static void __used __section(__func_stack_frame_non_standard) \
|
||||
static void __used __section(.discard.func_stack_frame_non_standard) \
|
||||
*__func_stack_frame_non_standard_##func = func
|
||||
|
||||
#else /* !CONFIG_STACK_VALIDATION */
|
||||
|
@@ -655,6 +655,11 @@ struct inode {
|
||||
void *i_private; /* fs or device private pointer */
|
||||
};
|
||||
|
||||
static inline unsigned int i_blocksize(const struct inode *node)
|
||||
{
|
||||
return (1 << node->i_blkbits);
|
||||
}
|
||||
|
||||
static inline int inode_unhashed(struct inode *inode)
|
||||
{
|
||||
return hlist_unhashed(&inode->i_hash);
|
||||
@@ -1562,6 +1567,9 @@ extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
|
||||
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
|
||||
extern int vfs_whiteout(struct inode *, struct dentry *);
|
||||
|
||||
extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
|
||||
int open_flag);
|
||||
|
||||
/*
|
||||
* VFS file helper functions.
|
||||
*/
|
||||
@@ -1701,7 +1709,7 @@ struct inode_operations {
|
||||
int (*rename) (struct inode *, struct dentry *,
|
||||
struct inode *, struct dentry *, unsigned int);
|
||||
int (*setattr) (struct dentry *, struct iattr *);
|
||||
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
|
||||
int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
|
||||
ssize_t (*listxattr) (struct dentry *, char *, size_t);
|
||||
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
|
||||
u64 len);
|
||||
@@ -1713,6 +1721,29 @@ struct inode_operations {
|
||||
int (*set_acl)(struct inode *, struct posix_acl *, int);
|
||||
} ____cacheline_aligned;
|
||||
|
||||
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
return file->f_op->read_iter(kio, iter);
|
||||
}
|
||||
|
||||
static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
return file->f_op->write_iter(kio, iter);
|
||||
}
|
||||
|
||||
static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
return file->f_op->mmap(file, vma);
|
||||
}
|
||||
|
||||
static inline int call_fsync(struct file *file, loff_t start, loff_t end,
|
||||
int datasync)
|
||||
{
|
||||
return file->f_op->fsync(file, start, end, datasync);
|
||||
}
|
||||
|
||||
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
|
||||
unsigned long nr_segs, unsigned long fast_segs,
|
||||
struct iovec *fast_pointer,
|
||||
@@ -1739,19 +1770,6 @@ extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
|
||||
extern int vfs_dedupe_file_range(struct file *file,
|
||||
struct file_dedupe_range *same);
|
||||
|
||||
static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out,
|
||||
u64 len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
sb_start_write(file_inode(file_out)->i_sb);
|
||||
ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
|
||||
sb_end_write(file_inode(file_out)->i_sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct super_operations {
|
||||
struct inode *(*alloc_inode)(struct super_block *sb);
|
||||
void (*destroy_inode)(struct inode *);
|
||||
@@ -2563,6 +2581,19 @@ static inline void file_end_write(struct file *file)
|
||||
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
|
||||
}
|
||||
|
||||
static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out,
|
||||
u64 len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
file_start_write(file_out);
|
||||
ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
|
||||
file_end_write(file_out);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* get_write_access() gets write permission for a file.
|
||||
* put_write_access() releases this write permission.
|
||||
@@ -2647,7 +2678,7 @@ static const char * const kernel_read_file_str[] = {
|
||||
|
||||
static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
|
||||
{
|
||||
if (id < 0 || id >= READING_MAX_ID)
|
||||
if ((unsigned)id >= READING_MAX_ID)
|
||||
return kernel_read_file_str[READING_UNKNOWN];
|
||||
|
||||
return kernel_read_file_str[id];
|
||||
@@ -2871,8 +2902,8 @@ extern int page_symlink(struct inode *inode, const char *symname, int len);
|
||||
extern const struct inode_operations page_symlink_inode_operations;
|
||||
extern void kfree_link(void *);
|
||||
extern void generic_fillattr(struct inode *, struct kstat *);
|
||||
int vfs_getattr_nosec(struct path *path, struct kstat *stat);
|
||||
extern int vfs_getattr(struct path *, struct kstat *);
|
||||
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
|
||||
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
|
||||
void __inode_add_bytes(struct inode *inode, loff_t bytes);
|
||||
void inode_add_bytes(struct inode *inode, loff_t bytes);
|
||||
void __inode_sub_bytes(struct inode *inode, loff_t bytes);
|
||||
@@ -2885,10 +2916,29 @@ extern const struct inode_operations simple_symlink_inode_operations;
|
||||
|
||||
extern int iterate_dir(struct file *, struct dir_context *);
|
||||
|
||||
extern int vfs_stat(const char __user *, struct kstat *);
|
||||
extern int vfs_lstat(const char __user *, struct kstat *);
|
||||
extern int vfs_fstat(unsigned int, struct kstat *);
|
||||
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
|
||||
extern int vfs_statx(int, const char __user *, int, struct kstat *, u32);
|
||||
extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
|
||||
|
||||
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
|
||||
{
|
||||
return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS);
|
||||
}
|
||||
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
|
||||
{
|
||||
return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW,
|
||||
stat, STATX_BASIC_STATS);
|
||||
}
|
||||
static inline int vfs_fstatat(int dfd, const char __user *filename,
|
||||
struct kstat *stat, int flags)
|
||||
{
|
||||
return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
|
||||
}
|
||||
static inline int vfs_fstat(int fd, struct kstat *stat)
|
||||
{
|
||||
return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0);
|
||||
}
|
||||
|
||||
|
||||
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
|
||||
extern int vfs_readlink(struct dentry *, char __user *, int);
|
||||
|
||||
@@ -2918,7 +2968,7 @@ extern int dcache_dir_close(struct inode *, struct file *);
|
||||
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
|
||||
extern int dcache_readdir(struct file *, struct dir_context *);
|
||||
extern int simple_setattr(struct dentry *, struct iattr *);
|
||||
extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
|
||||
extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int);
|
||||
extern int simple_statfs(struct dentry *, struct kstatfs *);
|
||||
extern int simple_open(struct inode *inode, struct file *file);
|
||||
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
|
||||
|
50
include/linux/fsi.h
Normal file
50
include/linux/fsi.h
Normal file
@@ -0,0 +1,50 @@
|
||||
/* FSI device & driver interfaces
|
||||
*
|
||||
* Copyright (C) IBM Corporation 2016
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef LINUX_FSI_H
|
||||
#define LINUX_FSI_H
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
struct fsi_device {
|
||||
struct device dev;
|
||||
u8 engine_type;
|
||||
u8 version;
|
||||
};
|
||||
|
||||
struct fsi_device_id {
|
||||
u8 engine_type;
|
||||
u8 version;
|
||||
};
|
||||
|
||||
#define FSI_VERSION_ANY 0
|
||||
|
||||
#define FSI_DEVICE(t) \
|
||||
.engine_type = (t), .version = FSI_VERSION_ANY,
|
||||
|
||||
#define FSI_DEVICE_VERSIONED(t, v) \
|
||||
.engine_type = (t), .version = (v),
|
||||
|
||||
|
||||
struct fsi_driver {
|
||||
struct device_driver drv;
|
||||
const struct fsi_device_id *id_table;
|
||||
};
|
||||
|
||||
#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
|
||||
#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
|
||||
|
||||
extern struct bus_type fsi_bus_type;
|
||||
|
||||
#endif /* LINUX_FSI_H */
|
@@ -73,7 +73,7 @@ struct diu_ad {
|
||||
/* Word 0(32-bit) in DDR memory */
|
||||
/* __u16 comp; */
|
||||
/* __u16 pixel_s:2; */
|
||||
/* __u16 pallete:1; */
|
||||
/* __u16 palette:1; */
|
||||
/* __u16 red_c:2; */
|
||||
/* __u16 green_c:2; */
|
||||
/* __u16 blue_c:2; */
|
||||
@@ -142,7 +142,7 @@ struct diu_ad {
|
||||
struct diu {
|
||||
__be32 desc[3];
|
||||
__be32 gamma;
|
||||
__be32 pallete;
|
||||
__be32 palette;
|
||||
__be32 cursor;
|
||||
__be32 curs_pos;
|
||||
__be32 diu_mode;
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/user_namespace.h>
|
||||
|
||||
/*
|
||||
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
|
||||
@@ -170,7 +171,7 @@ struct fsnotify_group {
|
||||
struct inotify_group_private_data {
|
||||
spinlock_t idr_lock;
|
||||
struct idr idr;
|
||||
struct user_struct *user;
|
||||
struct ucounts *ucounts;
|
||||
} inotify_data;
|
||||
#endif
|
||||
#ifdef CONFIG_FANOTIFY
|
||||
|
@@ -167,13 +167,6 @@ struct blk_integrity {
|
||||
};
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
struct disk_devt {
|
||||
atomic_t count;
|
||||
void (*release)(struct disk_devt *disk_devt);
|
||||
};
|
||||
|
||||
void put_disk_devt(struct disk_devt *disk_devt);
|
||||
void get_disk_devt(struct disk_devt *disk_devt);
|
||||
|
||||
struct gendisk {
|
||||
/* major, first_minor and minors are input parameters only,
|
||||
@@ -183,7 +176,6 @@ struct gendisk {
|
||||
int first_minor;
|
||||
int minors; /* maximum number of minors, =1 for
|
||||
* disks that can't be partitioned. */
|
||||
struct disk_devt *disk_devt;
|
||||
|
||||
char disk_name[DISK_NAME_LEN]; /* name of major driver */
|
||||
char *(*devnode)(struct gendisk *gd, umode_t *mode);
|
||||
|
@@ -541,7 +541,7 @@ static inline bool pm_suspended_storage(void)
|
||||
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
|
||||
/* The below functions must be run on a range from a single zone. */
|
||||
extern int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
unsigned migratetype);
|
||||
unsigned migratetype, gfp_t gfp_mask);
|
||||
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
|
||||
#endif
|
||||
|
||||
|
@@ -135,10 +135,15 @@ int desc_to_gpio(const struct gpio_desc *desc);
|
||||
struct fwnode_handle;
|
||||
|
||||
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
|
||||
const char *propname);
|
||||
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
|
||||
const char *con_id,
|
||||
struct fwnode_handle *child);
|
||||
const char *propname, int index,
|
||||
enum gpiod_flags dflags,
|
||||
const char *label);
|
||||
struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
|
||||
const char *con_id, int index,
|
||||
struct fwnode_handle *child,
|
||||
enum gpiod_flags flags,
|
||||
const char *label);
|
||||
|
||||
#else /* CONFIG_GPIOLIB */
|
||||
|
||||
static inline int gpiod_count(struct device *dev, const char *con_id)
|
||||
@@ -411,20 +416,38 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
|
||||
/* Child properties interface */
|
||||
struct fwnode_handle;
|
||||
|
||||
static inline struct gpio_desc *fwnode_get_named_gpiod(
|
||||
struct fwnode_handle *fwnode, const char *propname)
|
||||
static inline
|
||||
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
|
||||
const char *propname, int index,
|
||||
enum gpiod_flags dflags,
|
||||
const char *label)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static inline struct gpio_desc *devm_get_gpiod_from_child(
|
||||
struct device *dev, const char *con_id, struct fwnode_handle *child)
|
||||
static inline
|
||||
struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
|
||||
const char *con_id, int index,
|
||||
struct fwnode_handle *child,
|
||||
enum gpiod_flags flags,
|
||||
const char *label)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_GPIOLIB */
|
||||
|
||||
static inline
|
||||
struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
|
||||
const char *con_id,
|
||||
struct fwnode_handle *child,
|
||||
enum gpiod_flags flags,
|
||||
const char *label)
|
||||
{
|
||||
return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child,
|
||||
flags, label);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
|
||||
|
||||
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
|
||||
|
@@ -232,6 +232,7 @@ struct hid_sensor_common {
|
||||
atomic_t data_ready;
|
||||
atomic_t user_requested_state;
|
||||
struct iio_trigger *trigger;
|
||||
int timestamp_ns_scale;
|
||||
struct hid_sensor_hub_attribute_info poll;
|
||||
struct hid_sensor_hub_attribute_info report_state;
|
||||
struct hid_sensor_hub_attribute_info power_state;
|
||||
@@ -271,4 +272,7 @@ int hid_sensor_format_scale(u32 usage_id,
|
||||
|
||||
s32 hid_sensor_read_poll_value(struct hid_sensor_common *st);
|
||||
|
||||
int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st,
|
||||
int64_t raw_value);
|
||||
|
||||
#endif
|
||||
|
@@ -52,6 +52,9 @@
|
||||
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458
|
||||
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459
|
||||
|
||||
/* Gravity vector */
|
||||
#define HID_USAGE_SENSOR_GRAVITY_VECTOR 0x20007B
|
||||
|
||||
/* ORIENTATION: Compass 3D: (200083) */
|
||||
#define HID_USAGE_SENSOR_COMPASS_3D 0x200083
|
||||
#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470
|
||||
@@ -95,6 +98,7 @@
|
||||
#define HID_USAGE_SENSOR_TIME_HOUR 0x200525
|
||||
#define HID_USAGE_SENSOR_TIME_MINUTE 0x200526
|
||||
#define HID_USAGE_SENSOR_TIME_SECOND 0x200527
|
||||
#define HID_USAGE_SENSOR_TIME_TIMESTAMP 0x200529
|
||||
|
||||
/* Units */
|
||||
#define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00
|
||||
|
@@ -19,7 +19,6 @@
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/timerqueue.h>
|
||||
|
@@ -6,6 +6,18 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
||||
struct vm_area_struct *vma);
|
||||
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
|
||||
extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
|
||||
#else
|
||||
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
|
||||
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
@@ -17,6 +29,9 @@ extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
|
||||
extern int zap_huge_pmd(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
pmd_t *pmd, unsigned long addr);
|
||||
extern int zap_huge_pud(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
pud_t *pud, unsigned long addr);
|
||||
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned char *vec);
|
||||
@@ -26,13 +41,16 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, pgprot_t newprot,
|
||||
int prot_numa);
|
||||
int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
||||
pfn_t pfn, bool write);
|
||||
int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, pfn_t pfn, bool write);
|
||||
int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
pud_t *pud, pfn_t pfn, bool write);
|
||||
enum transparent_hugepage_flag {
|
||||
TRANSPARENT_HUGEPAGE_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
|
||||
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
|
||||
@@ -57,13 +75,14 @@ extern struct kobj_attribute shmem_enabled_attr;
|
||||
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, int flags);
|
||||
|
||||
#define HPAGE_PMD_SHIFT PMD_SHIFT
|
||||
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
|
||||
#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
|
||||
|
||||
#define HPAGE_PUD_SHIFT PUD_SHIFT
|
||||
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
|
||||
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
|
||||
|
||||
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
|
||||
|
||||
#define transparent_hugepage_enabled(__vma) \
|
||||
@@ -117,6 +136,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
|
||||
bool freeze, struct page *page);
|
||||
|
||||
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
|
||||
unsigned long address);
|
||||
|
||||
#define split_huge_pud(__vma, __pud, __address) \
|
||||
do { \
|
||||
pud_t *____pud = (__pud); \
|
||||
if (pud_trans_huge(*____pud) \
|
||||
|| pud_devmap(*____pud)) \
|
||||
__split_huge_pud(__vma, __pud, __address); \
|
||||
} while (0)
|
||||
|
||||
extern int hugepage_madvise(struct vm_area_struct *vma,
|
||||
unsigned long *vm_flags, int advice);
|
||||
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
||||
@@ -125,6 +155,8 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
||||
long adjust_next);
|
||||
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
|
||||
struct vm_area_struct *vma);
|
||||
extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
|
||||
struct vm_area_struct *vma);
|
||||
/* mmap_sem must be held on entry */
|
||||
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
|
||||
struct vm_area_struct *vma)
|
||||
@@ -135,6 +167,15 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
|
||||
if (pud_trans_huge(*pud) || pud_devmap(*pud))
|
||||
return __pud_trans_huge_lock(pud, vma);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
static inline int hpage_nr_pages(struct page *page)
|
||||
{
|
||||
if (unlikely(PageTransHuge(page)))
|
||||
@@ -142,6 +183,11 @@ static inline int hpage_nr_pages(struct page *page)
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, int flags);
|
||||
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
pud_t *pud, int flags);
|
||||
|
||||
extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
|
||||
|
||||
extern struct page *huge_zero_page;
|
||||
@@ -156,6 +202,11 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||
return is_huge_zero_page(pmd_page(pmd));
|
||||
}
|
||||
|
||||
static inline bool is_huge_zero_pud(pud_t pud)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
|
||||
void mm_put_huge_zero_page(struct mm_struct *mm);
|
||||
|
||||
@@ -166,6 +217,10 @@ void mm_put_huge_zero_page(struct mm_struct *mm);
|
||||
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
|
||||
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
|
||||
|
||||
#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
|
||||
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
|
||||
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
|
||||
|
||||
#define hpage_nr_pages(x) 1
|
||||
|
||||
#define transparent_hugepage_enabled(__vma) 0
|
||||
@@ -194,6 +249,9 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
|
||||
unsigned long address, bool freeze, struct page *page) {}
|
||||
|
||||
#define split_huge_pud(__vma, __pmd, __address) \
|
||||
do { } while (0)
|
||||
|
||||
static inline int hugepage_madvise(struct vm_area_struct *vma,
|
||||
unsigned long *vm_flags, int advice)
|
||||
{
|
||||
@@ -211,6 +269,11 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
|
||||
{
|
||||
@@ -222,6 +285,11 @@ static inline bool is_huge_zero_page(struct page *page)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_huge_zero_pud(pud_t pud)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mm_put_huge_zero_page(struct mm_struct *mm)
|
||||
{
|
||||
return;
|
||||
@@ -232,6 +300,12 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
|
||||
unsigned long addr, pud_t *pud, int flags)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
#endif /* _LINUX_HUGE_MM_H */
|
||||
|
@@ -65,7 +65,8 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
|
||||
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
||||
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
|
||||
struct page **, struct vm_area_struct **,
|
||||
unsigned long *, unsigned long *, long, unsigned int);
|
||||
unsigned long *, unsigned long *, long, unsigned int,
|
||||
int *);
|
||||
void unmap_hugepage_range(struct vm_area_struct *,
|
||||
unsigned long, unsigned long, struct page *);
|
||||
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
@@ -81,6 +82,11 @@ void hugetlb_show_meminfo(void);
|
||||
unsigned long hugetlb_total_pages(void);
|
||||
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags);
|
||||
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
struct page **pagep);
|
||||
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
|
||||
struct vm_area_struct *vma,
|
||||
vm_flags_t vm_flags);
|
||||
@@ -116,7 +122,7 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
||||
pud_t *pud, int flags);
|
||||
int pmd_huge(pmd_t pmd);
|
||||
int pud_huge(pud_t pmd);
|
||||
int pud_huge(pud_t pud);
|
||||
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot);
|
||||
|
||||
@@ -131,7 +137,7 @@ static inline unsigned long hugetlb_total_pages(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
|
||||
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
|
||||
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
|
||||
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
|
||||
static inline void hugetlb_report_meminfo(struct seq_file *m)
|
||||
@@ -149,6 +155,8 @@ static inline void hugetlb_show_meminfo(void)
|
||||
#define is_hugepage_only_range(mm, addr, len) 0
|
||||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
|
||||
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
|
||||
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
|
||||
src_addr, pagep) ({ BUG(); 0; })
|
||||
#define huge_pte_offset(mm, address) 0
|
||||
static inline int dequeue_hwpoisoned_huge_page(struct page *page)
|
||||
{
|
||||
@@ -189,6 +197,9 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
|
||||
#ifndef pgd_huge
|
||||
#define pgd_huge(x) 0
|
||||
#endif
|
||||
#ifndef p4d_huge
|
||||
#define p4d_huge(x) 0
|
||||
#endif
|
||||
|
||||
#ifndef pgd_write
|
||||
static inline int pgd_write(pgd_t pgd)
|
||||
|
@@ -32,11 +32,10 @@
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#define MAX_PAGE_BUFFER_COUNT 32
|
||||
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
|
||||
@@ -139,8 +138,8 @@ struct hv_ring_buffer_info {
|
||||
* for the specified ring buffer
|
||||
*/
|
||||
static inline void
|
||||
hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
|
||||
u32 *read, u32 *write)
|
||||
hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
|
||||
u32 *read, u32 *write)
|
||||
{
|
||||
u32 read_loc, write_loc, dsize;
|
||||
|
||||
@@ -154,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
|
||||
*read = dsize - *write;
|
||||
}
|
||||
|
||||
static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
|
||||
static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
u32 read_loc, write_loc, dsize, read;
|
||||
|
||||
@@ -168,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
|
||||
return read;
|
||||
}
|
||||
|
||||
static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
|
||||
static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
u32 read_loc, write_loc, dsize, write;
|
||||
|
||||
@@ -641,6 +640,7 @@ struct vmbus_channel_msginfo {
|
||||
|
||||
/* Synchronize the request/response if needed */
|
||||
struct completion waitevent;
|
||||
struct vmbus_channel *waiting_channel;
|
||||
union {
|
||||
struct vmbus_channel_version_supported version_supported;
|
||||
struct vmbus_channel_open_result open_result;
|
||||
@@ -683,11 +683,6 @@ struct hv_input_signal_event_buffer {
|
||||
struct hv_input_signal_event event;
|
||||
};
|
||||
|
||||
enum hv_signal_policy {
|
||||
HV_SIGNAL_POLICY_DEFAULT = 0,
|
||||
HV_SIGNAL_POLICY_EXPLICIT,
|
||||
};
|
||||
|
||||
enum hv_numa_policy {
|
||||
HV_BALANCED = 0,
|
||||
HV_LOCALIZED,
|
||||
@@ -747,26 +742,27 @@ struct vmbus_channel {
|
||||
|
||||
struct vmbus_close_msg close_msg;
|
||||
|
||||
/* Channel callback are invoked in this workqueue context */
|
||||
/* HANDLE dataWorkQueue; */
|
||||
|
||||
/* Channel callback's invoked in softirq context */
|
||||
struct tasklet_struct callback_event;
|
||||
void (*onchannel_callback)(void *context);
|
||||
void *channel_callback_context;
|
||||
|
||||
/*
|
||||
* A channel can be marked for efficient (batched)
|
||||
* reading:
|
||||
* If batched_reading is set to "true", we read until the
|
||||
* channel is empty and hold off interrupts from the host
|
||||
* during the entire read process.
|
||||
* If batched_reading is set to "false", the client is not
|
||||
* going to perform batched reading.
|
||||
*
|
||||
* By default we will enable batched reading; specific
|
||||
* drivers that don't want this behavior can turn it off.
|
||||
* A channel can be marked for one of three modes of reading:
|
||||
* BATCHED - callback called from taslket and should read
|
||||
* channel until empty. Interrupts from the host
|
||||
* are masked while read is in process (default).
|
||||
* DIRECT - callback called from tasklet (softirq).
|
||||
* ISR - callback called in interrupt context and must
|
||||
* invoke its own deferred processing.
|
||||
* Host interrupts are disabled and must be re-enabled
|
||||
* when ring is empty.
|
||||
*/
|
||||
|
||||
bool batched_reading;
|
||||
enum hv_callback_mode {
|
||||
HV_CALL_BATCHED,
|
||||
HV_CALL_DIRECT,
|
||||
HV_CALL_ISR
|
||||
} callback_mode;
|
||||
|
||||
bool is_dedicated_interrupt;
|
||||
struct hv_input_signal_event_buffer sig_buf;
|
||||
@@ -849,23 +845,6 @@ struct vmbus_channel {
|
||||
* link up channels based on their CPU affinity.
|
||||
*/
|
||||
struct list_head percpu_list;
|
||||
/*
|
||||
* Host signaling policy: The default policy will be
|
||||
* based on the ring buffer state. We will also support
|
||||
* a policy where the client driver can have explicit
|
||||
* signaling control.
|
||||
*/
|
||||
enum hv_signal_policy signal_policy;
|
||||
/*
|
||||
* On the channel send side, many of the VMBUS
|
||||
* device drivers explicity serialize access to the
|
||||
* outgoing ring buffer. Give more control to the
|
||||
* VMBUS device drivers in terms how to serialize
|
||||
* accesss to the outgoing ring buffer.
|
||||
* The default behavior will be to aquire the
|
||||
* ring lock to preserve the current behavior.
|
||||
*/
|
||||
bool acquire_ring_lock;
|
||||
/*
|
||||
* For performance critical channels (storage, networking
|
||||
* etc,), Hyper-V has a mechanism to enhance the throughput
|
||||
@@ -906,32 +885,22 @@ struct vmbus_channel {
|
||||
|
||||
};
|
||||
|
||||
static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
|
||||
{
|
||||
c->acquire_ring_lock = state;
|
||||
}
|
||||
|
||||
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
|
||||
{
|
||||
return !!(c->offermsg.offer.chn_flags &
|
||||
VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
|
||||
}
|
||||
|
||||
static inline void set_channel_signal_state(struct vmbus_channel *c,
|
||||
enum hv_signal_policy policy)
|
||||
{
|
||||
c->signal_policy = policy;
|
||||
}
|
||||
|
||||
static inline void set_channel_affinity_state(struct vmbus_channel *c,
|
||||
enum hv_numa_policy policy)
|
||||
{
|
||||
c->affinity_policy = policy;
|
||||
}
|
||||
|
||||
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
|
||||
static inline void set_channel_read_mode(struct vmbus_channel *c,
|
||||
enum hv_callback_mode mode)
|
||||
{
|
||||
c->batched_reading = state;
|
||||
c->callback_mode = mode;
|
||||
}
|
||||
|
||||
static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
|
||||
@@ -1054,8 +1023,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
|
||||
u32 bufferLen,
|
||||
u64 requestid,
|
||||
enum vmbus_packet_type type,
|
||||
u32 flags,
|
||||
bool kick_q);
|
||||
u32 flags);
|
||||
|
||||
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
|
||||
struct hv_page_buffer pagebuffers[],
|
||||
@@ -1070,8 +1038,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
||||
void *buffer,
|
||||
u32 bufferlen,
|
||||
u64 requestid,
|
||||
u32 flags,
|
||||
bool kick_q);
|
||||
u32 flags);
|
||||
|
||||
extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
||||
struct hv_multipage_buffer *mpb,
|
||||
@@ -1458,9 +1425,10 @@ struct hyperv_service_callback {
|
||||
};
|
||||
|
||||
#define MAX_SRV_VER 0x7ffffff
|
||||
extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
|
||||
struct icmsg_negotiate *, u8 *, int,
|
||||
int);
|
||||
extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
|
||||
const int *fw_version, int fw_vercnt,
|
||||
const int *srv_version, int srv_vercnt,
|
||||
int *nego_fw_version, int *nego_srv_version);
|
||||
|
||||
void hv_event_tasklet_disable(struct vmbus_channel *channel);
|
||||
void hv_event_tasklet_enable(struct vmbus_channel *channel);
|
||||
@@ -1480,9 +1448,9 @@ void vmbus_set_event(struct vmbus_channel *channel);
|
||||
|
||||
/* Get the start of the ring buffer. */
|
||||
static inline void *
|
||||
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
|
||||
hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
|
||||
{
|
||||
return (void *)ring_info->ring_buffer->buffer;
|
||||
return ring_info->ring_buffer->buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1544,6 +1512,36 @@ init_cached_read_index(struct vmbus_channel *channel)
|
||||
rbi->cached_read_index = rbi->ring_buffer->read_index;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask off host interrupt callback notifications
|
||||
*/
|
||||
static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
rbi->ring_buffer->interrupt_mask = 1;
|
||||
|
||||
/* make sure mask update is not reordered */
|
||||
virt_mb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Re-enable host callback and return number of outstanding bytes
|
||||
*/
|
||||
static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
|
||||
rbi->ring_buffer->interrupt_mask = 0;
|
||||
|
||||
/* make sure mask update is not reordered */
|
||||
virt_mb();
|
||||
|
||||
/*
|
||||
* Now check to see if the ring buffer is still empty.
|
||||
* If it is not, we raced and we need to process new
|
||||
* incoming messages.
|
||||
*/
|
||||
return hv_get_bytes_to_read(rbi);
|
||||
}
|
||||
|
||||
/*
|
||||
* An API to support in-place processing of incoming VMBUS packets.
|
||||
*/
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#include <linux/device.h> /* for struct device */
|
||||
#include <linux/sched.h> /* for completion */
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rtmutex.h>
|
||||
#include <linux/irqdomain.h> /* for Host Notify IRQ */
|
||||
#include <linux/of.h> /* for struct device_node */
|
||||
#include <linux/swab.h> /* for swab16 */
|
||||
@@ -283,6 +284,7 @@ enum i2c_slave_event {
|
||||
|
||||
extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
|
||||
extern int i2c_slave_unregister(struct i2c_client *client);
|
||||
extern bool i2c_detect_slave_mode(struct device *dev);
|
||||
|
||||
static inline int i2c_slave_event(struct i2c_client *client,
|
||||
enum i2c_slave_event event, u8 *val)
|
||||
|
@@ -1,20 +0,0 @@
|
||||
/* Header file for Freescale MPR121 Capacitive Touch Sensor */
|
||||
|
||||
#ifndef _MPR121_TOUCHKEY_H
|
||||
#define _MPR121_TOUCHKEY_H
|
||||
|
||||
/**
|
||||
* struct mpr121_platform_data - platform data for mpr121 sensor
|
||||
* @keymap: pointer to array of KEY_* values representing keymap
|
||||
* @keymap_size: size of the keymap
|
||||
* @wakeup: configure the button as a wake-up source
|
||||
* @vdd_uv: VDD voltage in uV
|
||||
*/
|
||||
struct mpr121_platform_data {
|
||||
const unsigned short *keymap;
|
||||
unsigned int keymap_size;
|
||||
bool wakeup;
|
||||
int vdd_uv;
|
||||
};
|
||||
|
||||
#endif /* _MPR121_TOUCHKEY_H */
|
@@ -12,47 +12,29 @@
|
||||
#ifndef __IDR_H__
|
||||
#define __IDR_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
/*
|
||||
* Using 6 bits at each layer allows us to allocate 7 layers out of each page.
|
||||
* 8 bits only gave us 3 layers out of every pair of pages, which is less
|
||||
* efficient except for trees with a largest element between 192-255 inclusive.
|
||||
*/
|
||||
#define IDR_BITS 6
|
||||
#define IDR_SIZE (1 << IDR_BITS)
|
||||
#define IDR_MASK ((1 << IDR_BITS)-1)
|
||||
|
||||
struct idr_layer {
|
||||
int prefix; /* the ID prefix of this idr_layer */
|
||||
int layer; /* distance from leaf */
|
||||
struct idr_layer __rcu *ary[1<<IDR_BITS];
|
||||
int count; /* When zero, we can release it */
|
||||
union {
|
||||
/* A zero bit means "space here" */
|
||||
DECLARE_BITMAP(bitmap, IDR_SIZE);
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
};
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
struct idr {
|
||||
struct idr_layer __rcu *hint; /* the last layer allocated from */
|
||||
struct idr_layer __rcu *top;
|
||||
int layers; /* only valid w/o concurrent changes */
|
||||
int cur; /* current pos for cyclic allocation */
|
||||
spinlock_t lock;
|
||||
int id_free_cnt;
|
||||
struct idr_layer *id_free;
|
||||
struct radix_tree_root idr_rt;
|
||||
unsigned int idr_next;
|
||||
};
|
||||
|
||||
#define IDR_INIT(name) \
|
||||
/*
|
||||
* The IDR API does not expose the tagging functionality of the radix tree
|
||||
* to users. Use tag 0 to track whether a node has free space below it.
|
||||
*/
|
||||
#define IDR_FREE 0
|
||||
|
||||
/* Set the IDR flag and the IDR_FREE tag */
|
||||
#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
|
||||
|
||||
#define IDR_INIT \
|
||||
{ \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
|
||||
}
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT
|
||||
|
||||
/**
|
||||
* idr_get_cursor - Return the current position of the cyclic allocator
|
||||
@@ -62,9 +44,9 @@ struct idr {
|
||||
* idr_alloc_cyclic() if it is free (otherwise the search will start from
|
||||
* this position).
|
||||
*/
|
||||
static inline unsigned int idr_get_cursor(struct idr *idr)
|
||||
static inline unsigned int idr_get_cursor(const struct idr *idr)
|
||||
{
|
||||
return READ_ONCE(idr->cur);
|
||||
return READ_ONCE(idr->idr_next);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr)
|
||||
*/
|
||||
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
||||
{
|
||||
WRITE_ONCE(idr->cur, val);
|
||||
WRITE_ONCE(idr->idr_next, val);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
||||
* period).
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is what we export.
|
||||
*/
|
||||
|
||||
void *idr_find_slowpath(struct idr *idp, int id);
|
||||
void idr_preload(gfp_t gfp_mask);
|
||||
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
|
||||
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
|
||||
int idr_for_each(struct idr *idp,
|
||||
int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
|
||||
int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
|
||||
int idr_for_each(const struct idr *,
|
||||
int (*fn)(int id, void *p, void *data), void *data);
|
||||
void *idr_get_next(struct idr *idp, int *nextid);
|
||||
void *idr_replace(struct idr *idp, void *ptr, int id);
|
||||
void idr_remove(struct idr *idp, int id);
|
||||
void idr_destroy(struct idr *idp);
|
||||
void idr_init(struct idr *idp);
|
||||
bool idr_is_empty(struct idr *idp);
|
||||
void *idr_get_next(struct idr *, int *nextid);
|
||||
void *idr_replace(struct idr *, void *, int id);
|
||||
void idr_destroy(struct idr *);
|
||||
|
||||
static inline void *idr_remove(struct idr *idr, int id)
|
||||
{
|
||||
return radix_tree_delete_item(&idr->idr_rt, id, NULL);
|
||||
}
|
||||
|
||||
static inline void idr_init(struct idr *idr)
|
||||
{
|
||||
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
|
||||
idr->idr_next = 0;
|
||||
}
|
||||
|
||||
static inline bool idr_is_empty(const struct idr *idr)
|
||||
{
|
||||
return radix_tree_empty(&idr->idr_rt) &&
|
||||
radix_tree_tagged(&idr->idr_rt, IDR_FREE);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_preload_end - end preload section started with idr_preload()
|
||||
@@ -137,19 +128,14 @@ static inline void idr_preload_end(void)
|
||||
* This function can be called under rcu_read_lock(), given that the leaf
|
||||
* pointers lifetimes are correctly managed.
|
||||
*/
|
||||
static inline void *idr_find(struct idr *idr, int id)
|
||||
static inline void *idr_find(const struct idr *idr, int id)
|
||||
{
|
||||
struct idr_layer *hint = rcu_dereference_raw(idr->hint);
|
||||
|
||||
if (hint && (id & ~IDR_MASK) == hint->prefix)
|
||||
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
|
||||
|
||||
return idr_find_slowpath(idr, id);
|
||||
return radix_tree_lookup(&idr->idr_rt, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_for_each_entry - iterate over an idr's elements of a given type
|
||||
* @idp: idr handle
|
||||
* @idr: idr handle
|
||||
* @entry: the type * to use as cursor
|
||||
* @id: id entry's key
|
||||
*
|
||||
@@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id)
|
||||
* after normal terminatinon @entry is left with the value NULL. This
|
||||
* is convenient for a "not found" value.
|
||||
*/
|
||||
#define idr_for_each_entry(idp, entry, id) \
|
||||
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
|
||||
#define idr_for_each_entry(idr, entry, id) \
|
||||
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
|
||||
|
||||
/**
|
||||
* idr_for_each_entry - continue iteration over an idr's elements of a given type
|
||||
* @idp: idr handle
|
||||
* idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
|
||||
* @idr: idr handle
|
||||
* @entry: the type * to use as cursor
|
||||
* @id: id entry's key
|
||||
*
|
||||
* Continue to iterate over list of given type, continuing after
|
||||
* the current position.
|
||||
*/
|
||||
#define idr_for_each_entry_continue(idp, entry, id) \
|
||||
for ((entry) = idr_get_next((idp), &(id)); \
|
||||
#define idr_for_each_entry_continue(idr, entry, id) \
|
||||
for ((entry) = idr_get_next((idr), &(id)); \
|
||||
entry; \
|
||||
++id, (entry) = idr_get_next((idp), &(id)))
|
||||
++id, (entry) = idr_get_next((idr), &(id)))
|
||||
|
||||
/*
|
||||
* IDA - IDR based id allocator, use when translation from id to
|
||||
* pointer isn't necessary.
|
||||
*
|
||||
* IDA_BITMAP_LONGS is calculated to be one less to accommodate
|
||||
* ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
|
||||
*/
|
||||
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
|
||||
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
|
||||
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
|
||||
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
|
||||
|
||||
struct ida_bitmap {
|
||||
long nr_busy;
|
||||
unsigned long bitmap[IDA_BITMAP_LONGS];
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
|
||||
|
||||
struct ida {
|
||||
struct idr idr;
|
||||
struct ida_bitmap *free_bitmap;
|
||||
struct radix_tree_root ida_rt;
|
||||
};
|
||||
|
||||
#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
|
||||
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
||||
#define IDA_INIT { \
|
||||
.ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
|
||||
}
|
||||
#define DEFINE_IDA(name) struct ida name = IDA_INIT
|
||||
|
||||
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
||||
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
|
||||
void ida_remove(struct ida *ida, int id);
|
||||
void ida_destroy(struct ida *ida);
|
||||
void ida_init(struct ida *ida);
|
||||
|
||||
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
|
||||
gfp_t gfp_mask);
|
||||
void ida_simple_remove(struct ida *ida, unsigned int id);
|
||||
|
||||
static inline void ida_init(struct ida *ida)
|
||||
{
|
||||
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
|
||||
}
|
||||
|
||||
/**
|
||||
* ida_get_new - allocate new ID
|
||||
* @ida: idr handle
|
||||
@@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
|
||||
return ida_get_new_above(ida, 0, p_id);
|
||||
}
|
||||
|
||||
static inline bool ida_is_empty(struct ida *ida)
|
||||
static inline bool ida_is_empty(const struct ida *ida)
|
||||
{
|
||||
return idr_is_empty(&ida->idr);
|
||||
return radix_tree_empty(&ida->ida_rt);
|
||||
}
|
||||
|
||||
void __init idr_init_cache(void);
|
||||
|
||||
#endif /* __IDR_H__ */
|
||||
|
@@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
|
||||
|
||||
/* number of user priorities 802.11 uses */
|
||||
#define IEEE80211_NUM_UPS 8
|
||||
/* number of ACs */
|
||||
#define IEEE80211_NUM_ACS 4
|
||||
|
||||
#define IEEE80211_QOS_CTL_LEN 2
|
||||
/* 1d tag mask */
|
||||
@@ -1041,8 +1043,9 @@ struct ieee80211_mgmt {
|
||||
} u;
|
||||
} __packed __aligned(2);
|
||||
|
||||
/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
|
||||
/* Supported rates membership selectors */
|
||||
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
|
||||
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
|
||||
|
||||
/* mgmt header + 1 byte category code */
|
||||
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
|
||||
@@ -2322,31 +2325,33 @@ enum ieee80211_sa_query_action {
|
||||
};
|
||||
|
||||
|
||||
/* cipher suite selectors */
|
||||
#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00
|
||||
#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01
|
||||
#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02
|
||||
/* reserved: 0x000FAC03 */
|
||||
#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
|
||||
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
|
||||
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
|
||||
#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
|
||||
#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
|
||||
#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
|
||||
#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
|
||||
#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
|
||||
#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
|
||||
#define SUITE(oui, id) (((oui) << 8) | (id))
|
||||
|
||||
#define WLAN_CIPHER_SUITE_SMS4 0x00147201
|
||||
/* cipher suite selectors */
|
||||
#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0)
|
||||
#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1)
|
||||
#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2)
|
||||
/* reserved: SUITE(0x000FAC, 3) */
|
||||
#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4)
|
||||
#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5)
|
||||
#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6)
|
||||
#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8)
|
||||
#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9)
|
||||
#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10)
|
||||
#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11)
|
||||
#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12)
|
||||
#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13)
|
||||
|
||||
#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1)
|
||||
|
||||
/* AKM suite selectors */
|
||||
#define WLAN_AKM_SUITE_8021X 0x000FAC01
|
||||
#define WLAN_AKM_SUITE_PSK 0x000FAC02
|
||||
#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05
|
||||
#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06
|
||||
#define WLAN_AKM_SUITE_TDLS 0x000FAC07
|
||||
#define WLAN_AKM_SUITE_SAE 0x000FAC08
|
||||
#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09
|
||||
#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
|
||||
#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
|
||||
#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
|
||||
#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
|
||||
#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
|
||||
#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
|
||||
#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
|
||||
|
||||
#define WLAN_MAX_KEY_LEN 32
|
||||
|
||||
|
@@ -46,6 +46,8 @@ struct br_ip_list {
|
||||
#define BR_LEARNING_SYNC BIT(9)
|
||||
#define BR_PROXYARP_WIFI BIT(10)
|
||||
#define BR_MCAST_FLOOD BIT(11)
|
||||
#define BR_MULTICAST_TO_UNICAST BIT(12)
|
||||
#define BR_VLAN_TUNNEL BIT(13)
|
||||
|
||||
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
|
||||
|
||||
|
@@ -66,8 +66,6 @@ struct dlci_local
|
||||
|
||||
struct frad_local
|
||||
{
|
||||
struct net_device_stats stats;
|
||||
|
||||
/* devices which this FRAD is slaved to */
|
||||
struct net_device *master[CONFIG_DLCI_MAX];
|
||||
short dlci[CONFIG_DLCI_MAX];
|
||||
|
@@ -9,19 +9,6 @@
|
||||
#include <net/netlink.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_MACVTAP)
|
||||
struct socket *macvtap_get_socket(struct file *);
|
||||
#else
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
struct file;
|
||||
struct socket;
|
||||
static inline struct socket *macvtap_get_socket(struct file *f)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
#endif /* CONFIG_MACVTAP */
|
||||
|
||||
struct macvlan_port;
|
||||
struct macvtap_queue;
|
||||
|
||||
@@ -29,7 +16,7 @@ struct macvtap_queue;
|
||||
* Maximum times a macvtap device can be opened. This can be used to
|
||||
* configure the number of receive queue, e.g. for multiqueue virtio.
|
||||
*/
|
||||
#define MAX_MACVTAP_QUEUES 256
|
||||
#define MAX_TAP_QUEUES 256
|
||||
|
||||
#define MACVLAN_MC_FILTER_BITS 8
|
||||
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
|
||||
@@ -49,7 +36,7 @@ struct macvlan_dev {
|
||||
enum macvlan_mode mode;
|
||||
u16 flags;
|
||||
/* This array tracks active taps. */
|
||||
struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES];
|
||||
struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
|
||||
/* This list tracks all taps (both enabled and disabled) */
|
||||
struct list_head queue_list;
|
||||
int numvtaps;
|
||||
|
75
include/linux/if_tap.h
Normal file
75
include/linux/if_tap.h
Normal file
@@ -0,0 +1,75 @@
|
||||
#ifndef _LINUX_IF_TAP_H_
|
||||
#define _LINUX_IF_TAP_H_
|
||||
|
||||
#if IS_ENABLED(CONFIG_TAP)
|
||||
struct socket *tap_get_socket(struct file *);
|
||||
#else
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
struct file;
|
||||
struct socket;
|
||||
static inline struct socket *tap_get_socket(struct file *f)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
#endif /* CONFIG_TAP */
|
||||
|
||||
#include <net/sock.h>
|
||||
#include <linux/skb_array.h>
|
||||
|
||||
#define MAX_TAP_QUEUES 256
|
||||
|
||||
struct tap_queue;
|
||||
|
||||
struct tap_dev {
|
||||
struct net_device *dev;
|
||||
u16 flags;
|
||||
/* This array tracks active taps. */
|
||||
struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
|
||||
/* This list tracks all taps (both enabled and disabled) */
|
||||
struct list_head queue_list;
|
||||
int numvtaps;
|
||||
int numqueues;
|
||||
netdev_features_t tap_features;
|
||||
int minor;
|
||||
|
||||
void (*update_features)(struct tap_dev *tap, netdev_features_t features);
|
||||
void (*count_tx_dropped)(struct tap_dev *tap);
|
||||
void (*count_rx_dropped)(struct tap_dev *tap);
|
||||
};
|
||||
|
||||
/*
|
||||
* A tap queue is the central object of tap module, it connects
|
||||
* an open character device to virtual interface. There can be
|
||||
* multiple queues on one interface, which map back to queues
|
||||
* implemented in hardware on the underlying device.
|
||||
*
|
||||
* tap_proto is used to allocate queues through the sock allocation
|
||||
* mechanism.
|
||||
*
|
||||
*/
|
||||
|
||||
struct tap_queue {
|
||||
struct sock sk;
|
||||
struct socket sock;
|
||||
struct socket_wq wq;
|
||||
int vnet_hdr_sz;
|
||||
struct tap_dev __rcu *tap;
|
||||
struct file *file;
|
||||
unsigned int flags;
|
||||
u16 queue_index;
|
||||
bool enabled;
|
||||
struct list_head next;
|
||||
struct skb_array skb_array;
|
||||
};
|
||||
|
||||
rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
|
||||
void tap_del_queues(struct tap_dev *tap);
|
||||
int tap_get_minor(dev_t major, struct tap_dev *tap);
|
||||
void tap_free_minor(dev_t major, struct tap_dev *tap);
|
||||
int tap_queue_resize(struct tap_dev *tap);
|
||||
int tap_create_cdev(struct cdev *tap_cdev,
|
||||
dev_t *tap_major, const char *device_name);
|
||||
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
|
||||
|
||||
#endif /*_LINUX_IF_TAP_H_*/
|
@@ -11,139 +11,15 @@
|
||||
#define _IIO_BUFFER_GENERIC_H_
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#ifdef CONFIG_IIO_BUFFER
|
||||
|
||||
struct iio_buffer;
|
||||
|
||||
/**
|
||||
* INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
|
||||
* configured. It has a fixed value which will be buffer specific.
|
||||
*/
|
||||
#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0)
|
||||
void iio_buffer_set_attrs(struct iio_buffer *buffer,
|
||||
const struct attribute **attrs);
|
||||
|
||||
/**
|
||||
* struct iio_buffer_access_funcs - access functions for buffers.
|
||||
* @store_to: actually store stuff to the buffer
|
||||
* @read_first_n: try to get a specified number of bytes (must exist)
|
||||
* @data_available: indicates how much data is available for reading from
|
||||
* the buffer.
|
||||
* @request_update: if a parameter change has been marked, update underlying
|
||||
* storage.
|
||||
* @set_bytes_per_datum:set number of bytes per datum
|
||||
* @set_length: set number of datums in buffer
|
||||
* @enable: called if the buffer is attached to a device and the
|
||||
* device starts sampling. Calls are balanced with
|
||||
* @disable.
|
||||
* @disable: called if the buffer is attached to a device and the
|
||||
* device stops sampling. Calles are balanced with @enable.
|
||||
* @release: called when the last reference to the buffer is dropped,
|
||||
* should free all resources allocated by the buffer.
|
||||
* @modes: Supported operating modes by this buffer type
|
||||
* @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
|
||||
*
|
||||
* The purpose of this structure is to make the buffer element
|
||||
* modular as event for a given driver, different usecases may require
|
||||
* different buffer designs (space efficiency vs speed for example).
|
||||
*
|
||||
* It is worth noting that a given buffer implementation may only support a
|
||||
* small proportion of these functions. The core code 'should' cope fine with
|
||||
* any of them not existing.
|
||||
**/
|
||||
struct iio_buffer_access_funcs {
|
||||
int (*store_to)(struct iio_buffer *buffer, const void *data);
|
||||
int (*read_first_n)(struct iio_buffer *buffer,
|
||||
size_t n,
|
||||
char __user *buf);
|
||||
size_t (*data_available)(struct iio_buffer *buffer);
|
||||
|
||||
int (*request_update)(struct iio_buffer *buffer);
|
||||
|
||||
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
|
||||
int (*set_length)(struct iio_buffer *buffer, int length);
|
||||
|
||||
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
|
||||
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
|
||||
|
||||
void (*release)(struct iio_buffer *buffer);
|
||||
|
||||
unsigned int modes;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iio_buffer - general buffer structure
|
||||
* @length: [DEVICE] number of datums in buffer
|
||||
* @bytes_per_datum: [DEVICE] size of individual datum including timestamp
|
||||
* @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
|
||||
* control method is used
|
||||
* @scan_mask: [INTERN] bitmask used in masking scan mode elements
|
||||
* @scan_timestamp: [INTERN] does the scan mode include a timestamp
|
||||
* @access: [DRIVER] buffer access functions associated with the
|
||||
* implementation.
|
||||
* @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
|
||||
* @buffer_group: [INTERN] attributes of the buffer group
|
||||
* @scan_el_group: [DRIVER] attribute group for those attributes not
|
||||
* created from the iio_chan_info array.
|
||||
* @pollq: [INTERN] wait queue to allow for polling on the buffer.
|
||||
* @stufftoread: [INTERN] flag to indicate new data.
|
||||
* @attrs: [INTERN] standard attributes of the buffer
|
||||
* @demux_list: [INTERN] list of operations required to demux the scan.
|
||||
* @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
|
||||
* @buffer_list: [INTERN] entry in the devices list of current buffers.
|
||||
* @ref: [INTERN] reference count of the buffer.
|
||||
* @watermark: [INTERN] number of datums to wait for poll/read.
|
||||
*/
|
||||
struct iio_buffer {
|
||||
int length;
|
||||
int bytes_per_datum;
|
||||
struct attribute_group *scan_el_attrs;
|
||||
long *scan_mask;
|
||||
bool scan_timestamp;
|
||||
const struct iio_buffer_access_funcs *access;
|
||||
struct list_head scan_el_dev_attr_list;
|
||||
struct attribute_group buffer_group;
|
||||
struct attribute_group scan_el_group;
|
||||
wait_queue_head_t pollq;
|
||||
bool stufftoread;
|
||||
const struct attribute **attrs;
|
||||
struct list_head demux_list;
|
||||
void *demux_bounce;
|
||||
struct list_head buffer_list;
|
||||
struct kref ref;
|
||||
unsigned int watermark;
|
||||
};
|
||||
|
||||
/**
|
||||
* iio_update_buffers() - add or remove buffer from active list
|
||||
* @indio_dev: device to add buffer to
|
||||
* @insert_buffer: buffer to insert
|
||||
* @remove_buffer: buffer_to_remove
|
||||
*
|
||||
* Note this will tear down the all buffering and build it up again
|
||||
*/
|
||||
int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer);
|
||||
|
||||
/**
|
||||
* iio_buffer_init() - Initialize the buffer structure
|
||||
* @buffer: buffer to be initialized
|
||||
**/
|
||||
void iio_buffer_init(struct iio_buffer *buffer);
|
||||
|
||||
int iio_scan_mask_query(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer, int bit);
|
||||
|
||||
/**
|
||||
* iio_push_to_buffers() - push to a registered buffer.
|
||||
* @indio_dev: iio_dev structure for device.
|
||||
* @data: Full scan.
|
||||
*/
|
||||
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
|
||||
|
||||
/*
|
||||
/**
|
||||
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
|
||||
* @indio_dev: iio_dev structure for device.
|
||||
* @data: sample data
|
||||
@@ -168,34 +44,10 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
|
||||
return iio_push_to_buffers(indio_dev, data);
|
||||
}
|
||||
|
||||
int iio_update_demux(struct iio_dev *indio_dev);
|
||||
|
||||
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
|
||||
const unsigned long *mask);
|
||||
const unsigned long *mask);
|
||||
|
||||
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
|
||||
void iio_buffer_put(struct iio_buffer *buffer);
|
||||
|
||||
/**
|
||||
* iio_device_attach_buffer - Attach a buffer to a IIO device
|
||||
* @indio_dev: The device the buffer should be attached to
|
||||
* @buffer: The buffer to attach to the device
|
||||
*
|
||||
* This function attaches a buffer to a IIO device. The buffer stays attached to
|
||||
* the device until the device is freed. The function should only be called at
|
||||
* most once per device.
|
||||
*/
|
||||
static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer)
|
||||
{
|
||||
indio_dev->buffer = iio_buffer_get(buffer);
|
||||
}
|
||||
|
||||
#else /* CONFIG_IIO_BUFFER */
|
||||
|
||||
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
|
||||
static inline void iio_buffer_put(struct iio_buffer *buffer) {}
|
||||
|
||||
#endif /* CONFIG_IIO_BUFFER */
|
||||
void iio_device_attach_buffer(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer);
|
||||
|
||||
#endif /* _IIO_BUFFER_GENERIC_H_ */
|
||||
|
162
include/linux/iio/buffer_impl.h
Normal file
162
include/linux/iio/buffer_impl.h
Normal file
@@ -0,0 +1,162 @@
|
||||
#ifndef _IIO_BUFFER_GENERIC_IMPL_H_
|
||||
#define _IIO_BUFFER_GENERIC_IMPL_H_
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#ifdef CONFIG_IIO_BUFFER
|
||||
|
||||
struct iio_dev;
|
||||
struct iio_buffer;
|
||||
|
||||
/**
|
||||
* INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
|
||||
* configured. It has a fixed value which will be buffer specific.
|
||||
*/
|
||||
#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0)
|
||||
|
||||
/**
|
||||
* struct iio_buffer_access_funcs - access functions for buffers.
|
||||
* @store_to: actually store stuff to the buffer
|
||||
* @read_first_n: try to get a specified number of bytes (must exist)
|
||||
* @data_available: indicates how much data is available for reading from
|
||||
* the buffer.
|
||||
* @request_update: if a parameter change has been marked, update underlying
|
||||
* storage.
|
||||
* @set_bytes_per_datum:set number of bytes per datum
|
||||
* @set_length: set number of datums in buffer
|
||||
* @enable: called if the buffer is attached to a device and the
|
||||
* device starts sampling. Calls are balanced with
|
||||
* @disable.
|
||||
* @disable: called if the buffer is attached to a device and the
|
||||
* device stops sampling. Calles are balanced with @enable.
|
||||
* @release: called when the last reference to the buffer is dropped,
|
||||
* should free all resources allocated by the buffer.
|
||||
* @modes: Supported operating modes by this buffer type
|
||||
* @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
|
||||
*
|
||||
* The purpose of this structure is to make the buffer element
|
||||
* modular as event for a given driver, different usecases may require
|
||||
* different buffer designs (space efficiency vs speed for example).
|
||||
*
|
||||
* It is worth noting that a given buffer implementation may only support a
|
||||
* small proportion of these functions. The core code 'should' cope fine with
|
||||
* any of them not existing.
|
||||
**/
|
||||
struct iio_buffer_access_funcs {
|
||||
int (*store_to)(struct iio_buffer *buffer, const void *data);
|
||||
int (*read_first_n)(struct iio_buffer *buffer,
|
||||
size_t n,
|
||||
char __user *buf);
|
||||
size_t (*data_available)(struct iio_buffer *buffer);
|
||||
|
||||
int (*request_update)(struct iio_buffer *buffer);
|
||||
|
||||
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
|
||||
int (*set_length)(struct iio_buffer *buffer, int length);
|
||||
|
||||
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
|
||||
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
|
||||
|
||||
void (*release)(struct iio_buffer *buffer);
|
||||
|
||||
unsigned int modes;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iio_buffer - general buffer structure
|
||||
*
|
||||
* Note that the internals of this structure should only be of interest to
|
||||
* those writing new buffer implementations.
|
||||
*/
|
||||
struct iio_buffer {
|
||||
/** @length: Number of datums in buffer. */
|
||||
int length;
|
||||
|
||||
/** @bytes_per_datum: Size of individual datum including timestamp. */
|
||||
int bytes_per_datum;
|
||||
|
||||
/**
|
||||
* @access: Buffer access functions associated with the
|
||||
* implementation.
|
||||
*/
|
||||
const struct iio_buffer_access_funcs *access;
|
||||
|
||||
/** @scan_mask: Bitmask used in masking scan mode elements. */
|
||||
long *scan_mask;
|
||||
|
||||
/** @demux_list: List of operations required to demux the scan. */
|
||||
struct list_head demux_list;
|
||||
|
||||
/** @pollq: Wait queue to allow for polling on the buffer. */
|
||||
wait_queue_head_t pollq;
|
||||
|
||||
/** @watermark: Number of datums to wait for poll/read. */
|
||||
unsigned int watermark;
|
||||
|
||||
/* private: */
|
||||
/*
|
||||
* @scan_el_attrs: Control of scan elements if that scan mode
|
||||
* control method is used.
|
||||
*/
|
||||
struct attribute_group *scan_el_attrs;
|
||||
|
||||
/* @scan_timestamp: Does the scan mode include a timestamp. */
|
||||
bool scan_timestamp;
|
||||
|
||||
/* @scan_el_dev_attr_list: List of scan element related attributes. */
|
||||
struct list_head scan_el_dev_attr_list;
|
||||
|
||||
/* @buffer_group: Attributes of the buffer group. */
|
||||
struct attribute_group buffer_group;
|
||||
|
||||
/*
|
||||
* @scan_el_group: Attribute group for those attributes not
|
||||
* created from the iio_chan_info array.
|
||||
*/
|
||||
struct attribute_group scan_el_group;
|
||||
|
||||
/* @stufftoread: Flag to indicate new data. */
|
||||
bool stufftoread;
|
||||
|
||||
/* @attrs: Standard attributes of the buffer. */
|
||||
const struct attribute **attrs;
|
||||
|
||||
/* @demux_bounce: Buffer for doing gather from incoming scan. */
|
||||
void *demux_bounce;
|
||||
|
||||
/* @buffer_list: Entry in the devices list of current buffers. */
|
||||
struct list_head buffer_list;
|
||||
|
||||
/* @ref: Reference count of the buffer. */
|
||||
struct kref ref;
|
||||
};
|
||||
|
||||
/**
|
||||
* iio_update_buffers() - add or remove buffer from active list
|
||||
* @indio_dev: device to add buffer to
|
||||
* @insert_buffer: buffer to insert
|
||||
* @remove_buffer: buffer_to_remove
|
||||
*
|
||||
* Note this will tear down the all buffering and build it up again
|
||||
*/
|
||||
int iio_update_buffers(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *insert_buffer,
|
||||
struct iio_buffer *remove_buffer);
|
||||
|
||||
/**
|
||||
* iio_buffer_init() - Initialize the buffer structure
|
||||
* @buffer: buffer to be initialized
|
||||
**/
|
||||
void iio_buffer_init(struct iio_buffer *buffer);
|
||||
|
||||
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
|
||||
void iio_buffer_put(struct iio_buffer *buffer);
|
||||
|
||||
#else /* CONFIG_IIO_BUFFER */
|
||||
|
||||
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
|
||||
static inline void iio_buffer_put(struct iio_buffer *buffer) {}
|
||||
|
||||
#endif /* CONFIG_IIO_BUFFER */
|
||||
#endif /* _IIO_BUFFER_GENERIC_IMPL_H_ */
|
@@ -28,4 +28,13 @@ static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
int st_sensors_match_acpi_device(struct device *dev);
|
||||
#else
|
||||
static inline int st_sensors_match_acpi_device(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ST_SENSORS_I2C_H */
|
||||
|
@@ -1,9 +1,8 @@
|
||||
#ifndef __LINUX_IIO_KFIFO_BUF_H__
|
||||
#define __LINUX_IIO_KFIFO_BUF_H__
|
||||
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/iio/buffer.h>
|
||||
struct iio_buffer;
|
||||
struct device;
|
||||
|
||||
struct iio_buffer *iio_kfifo_allocate(void);
|
||||
void iio_kfifo_free(struct iio_buffer *r);
|
||||
|
62
include/linux/iio/timer/stm32-timer-trigger.h
Normal file
62
include/linux/iio/timer/stm32-timer-trigger.h
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (C) STMicroelectronics 2016
|
||||
*
|
||||
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
|
||||
*
|
||||
* License terms: GNU General Public License (GPL), version 2
|
||||
*/
|
||||
|
||||
#ifndef _STM32_TIMER_TRIGGER_H_
|
||||
#define _STM32_TIMER_TRIGGER_H_
|
||||
|
||||
#define TIM1_TRGO "tim1_trgo"
|
||||
#define TIM1_CH1 "tim1_ch1"
|
||||
#define TIM1_CH2 "tim1_ch2"
|
||||
#define TIM1_CH3 "tim1_ch3"
|
||||
#define TIM1_CH4 "tim1_ch4"
|
||||
|
||||
#define TIM2_TRGO "tim2_trgo"
|
||||
#define TIM2_CH1 "tim2_ch1"
|
||||
#define TIM2_CH2 "tim2_ch2"
|
||||
#define TIM2_CH3 "tim2_ch3"
|
||||
#define TIM2_CH4 "tim2_ch4"
|
||||
|
||||
#define TIM3_TRGO "tim3_trgo"
|
||||
#define TIM3_CH1 "tim3_ch1"
|
||||
#define TIM3_CH2 "tim3_ch2"
|
||||
#define TIM3_CH3 "tim3_ch3"
|
||||
#define TIM3_CH4 "tim3_ch4"
|
||||
|
||||
#define TIM4_TRGO "tim4_trgo"
|
||||
#define TIM4_CH1 "tim4_ch1"
|
||||
#define TIM4_CH2 "tim4_ch2"
|
||||
#define TIM4_CH3 "tim4_ch3"
|
||||
#define TIM4_CH4 "tim4_ch4"
|
||||
|
||||
#define TIM5_TRGO "tim5_trgo"
|
||||
#define TIM5_CH1 "tim5_ch1"
|
||||
#define TIM5_CH2 "tim5_ch2"
|
||||
#define TIM5_CH3 "tim5_ch3"
|
||||
#define TIM5_CH4 "tim5_ch4"
|
||||
|
||||
#define TIM6_TRGO "tim6_trgo"
|
||||
|
||||
#define TIM7_TRGO "tim7_trgo"
|
||||
|
||||
#define TIM8_TRGO "tim8_trgo"
|
||||
#define TIM8_CH1 "tim8_ch1"
|
||||
#define TIM8_CH2 "tim8_ch2"
|
||||
#define TIM8_CH3 "tim8_ch3"
|
||||
#define TIM8_CH4 "tim8_ch4"
|
||||
|
||||
#define TIM9_TRGO "tim9_trgo"
|
||||
#define TIM9_CH1 "tim9_ch1"
|
||||
#define TIM9_CH2 "tim9_ch2"
|
||||
|
||||
#define TIM12_TRGO "tim12_trgo"
|
||||
#define TIM12_CH1 "tim12_ch1"
|
||||
#define TIM12_CH2 "tim12_ch2"
|
||||
|
||||
bool is_stm32_timer_trigger(struct iio_trigger *trig);
|
||||
|
||||
#endif
|
@@ -126,10 +126,10 @@ void prepare_namespace(void);
|
||||
void __init load_default_modules(void);
|
||||
int __init init_rootfs(void);
|
||||
|
||||
#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
|
||||
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
|
||||
extern bool rodata_enabled;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
void mark_rodata_ro(void);
|
||||
#endif
|
||||
|
||||
|
@@ -12,8 +12,10 @@
|
||||
#include <linux/securebits.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/sched/autogroup.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
@@ -149,8 +151,6 @@ extern struct group_info init_groups;
|
||||
|
||||
extern struct cred init_cred;
|
||||
|
||||
extern struct task_group root_task_group;
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
# define INIT_CGROUP_SCHED(tsk) \
|
||||
.sched_task_group = &root_task_group,
|
||||
|
@@ -80,24 +80,9 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
|
||||
unsigned int rows, unsigned int cols,
|
||||
unsigned short *keymap,
|
||||
struct input_dev *input_dev);
|
||||
int matrix_keypad_parse_properties(struct device *dev,
|
||||
unsigned int *rows, unsigned int *cols);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
/**
|
||||
* matrix_keypad_parse_of_params() - Read parameters from matrix-keypad node
|
||||
*
|
||||
* @dev: Device containing of_node
|
||||
* @rows: Returns number of matrix rows
|
||||
* @cols: Returns number of matrix columns
|
||||
* @return 0 if OK, <0 on error
|
||||
*/
|
||||
int matrix_keypad_parse_of_params(struct device *dev,
|
||||
unsigned int *rows, unsigned int *cols);
|
||||
#else
|
||||
static inline int matrix_keypad_parse_of_params(struct device *dev,
|
||||
unsigned int *rows, unsigned int *cols)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
#define matrix_keypad_parse_of_params matrix_keypad_parse_properties
|
||||
|
||||
#endif /* _MATRIX_KEYPAD_H */
|
||||
|
@@ -1,44 +0,0 @@
|
||||
/*
|
||||
* TCA8418 keypad platform support
|
||||
*
|
||||
* Copyright (C) 2011 Fuel7, Inc. All rights reserved.
|
||||
*
|
||||
* Author: Kyle Manna <kyle.manna@fuel7.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*
|
||||
* If you can't comply with GPLv2, alternative licensing terms may be
|
||||
* arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
|
||||
* alternative licensing inquiries.
|
||||
*/
|
||||
|
||||
#ifndef _TCA8418_KEYPAD_H
|
||||
#define _TCA8418_KEYPAD_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/input/matrix_keypad.h>
|
||||
|
||||
#define TCA8418_I2C_ADDR 0x34
|
||||
#define TCA8418_NAME "tca8418_keypad"
|
||||
|
||||
struct tca8418_keypad_platform_data {
|
||||
const struct matrix_keymap_data *keymap_data;
|
||||
unsigned rows;
|
||||
unsigned cols;
|
||||
bool rep;
|
||||
bool irq_is_gpio;
|
||||
};
|
||||
|
||||
#endif
|
@@ -72,17 +72,16 @@ struct iomap_ops {
|
||||
};
|
||||
|
||||
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
struct iomap_ops *ops);
|
||||
const struct iomap_ops *ops);
|
||||
int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
|
||||
struct iomap_ops *ops);
|
||||
const struct iomap_ops *ops);
|
||||
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
|
||||
bool *did_zero, struct iomap_ops *ops);
|
||||
bool *did_zero, const struct iomap_ops *ops);
|
||||
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||
struct iomap_ops *ops);
|
||||
int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
struct iomap_ops *ops);
|
||||
const struct iomap_ops *ops);
|
||||
int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops);
|
||||
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
loff_t start, loff_t len, struct iomap_ops *ops);
|
||||
loff_t start, loff_t len, const struct iomap_ops *ops);
|
||||
|
||||
/*
|
||||
* Flags for direct I/O ->end_io:
|
||||
@@ -92,6 +91,6 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
|
||||
unsigned flags);
|
||||
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
struct iomap_ops *ops, iomap_dio_end_io_t end_io);
|
||||
const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
|
||||
|
||||
#endif /* LINUX_IOMAP_H */
|
||||
|
@@ -17,7 +17,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
|
@@ -100,7 +100,7 @@ struct ipmi_user_hndl {
|
||||
|
||||
/* Create a new user of the IPMI layer on the given interface number. */
|
||||
int ipmi_create_user(unsigned int if_num,
|
||||
struct ipmi_user_hndl *handler,
|
||||
const struct ipmi_user_hndl *handler,
|
||||
void *handler_data,
|
||||
ipmi_user_t *user);
|
||||
|
||||
|
@@ -69,6 +69,7 @@ struct ipv6_devconf {
|
||||
__s32 seg6_require_hmac;
|
||||
#endif
|
||||
__u32 enhanced_dad;
|
||||
__u32 addr_gen_mode;
|
||||
|
||||
struct ctl_table_header *sysctl_header;
|
||||
};
|
||||
|
@@ -349,8 +349,32 @@
|
||||
/*
|
||||
* CPU interface registers
|
||||
*/
|
||||
#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
|
||||
#define ICC_CTLR_EL1_EOImode_drop (1U << 1)
|
||||
#define ICC_CTLR_EL1_EOImode_SHIFT (1)
|
||||
#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT)
|
||||
#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT)
|
||||
#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT)
|
||||
#define ICC_CTLR_EL1_CBPR_SHIFT 0
|
||||
#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT)
|
||||
#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8
|
||||
#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT)
|
||||
#define ICC_CTLR_EL1_ID_BITS_SHIFT 11
|
||||
#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT)
|
||||
#define ICC_CTLR_EL1_SEIS_SHIFT 14
|
||||
#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT)
|
||||
#define ICC_CTLR_EL1_A3V_SHIFT 15
|
||||
#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT)
|
||||
#define ICC_PMR_EL1_SHIFT 0
|
||||
#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT)
|
||||
#define ICC_BPR0_EL1_SHIFT 0
|
||||
#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT)
|
||||
#define ICC_BPR1_EL1_SHIFT 0
|
||||
#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT)
|
||||
#define ICC_IGRPEN0_EL1_SHIFT 0
|
||||
#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT)
|
||||
#define ICC_IGRPEN1_EL1_SHIFT 0
|
||||
#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT)
|
||||
#define ICC_SRE_EL1_DIB (1U << 2)
|
||||
#define ICC_SRE_EL1_DFB (1U << 1)
|
||||
#define ICC_SRE_EL1_SRE (1U << 0)
|
||||
|
||||
/*
|
||||
@@ -379,14 +403,29 @@
|
||||
#define ICH_HCR_EN (1 << 0)
|
||||
#define ICH_HCR_UIE (1 << 1)
|
||||
|
||||
#define ICH_VMCR_CTLR_SHIFT 0
|
||||
#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT)
|
||||
#define ICH_VMCR_CBPR_SHIFT 4
|
||||
#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
|
||||
#define ICH_VMCR_EOIM_SHIFT 9
|
||||
#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
|
||||
#define ICH_VMCR_BPR1_SHIFT 18
|
||||
#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
|
||||
#define ICH_VMCR_BPR0_SHIFT 21
|
||||
#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
|
||||
#define ICH_VMCR_PMR_SHIFT 24
|
||||
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
|
||||
#define ICH_VMCR_ENG0_SHIFT 0
|
||||
#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
|
||||
#define ICH_VMCR_ENG1_SHIFT 1
|
||||
#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
|
||||
|
||||
#define ICH_VTR_PRI_BITS_SHIFT 29
|
||||
#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
|
||||
#define ICH_VTR_ID_BITS_SHIFT 23
|
||||
#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
|
||||
#define ICH_VTR_SEIS_SHIFT 22
|
||||
#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
|
||||
#define ICH_VTR_A3V_SHIFT 21
|
||||
#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
|
||||
|
||||
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
|
||||
|
||||
|
@@ -524,6 +524,10 @@ static inline struct irq_domain *irq_find_matching_fwnode(
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline bool irq_domain_check_msi_remap(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* !CONFIG_IRQ_DOMAIN */
|
||||
|
||||
#endif /* _LINUX_IRQDOMAIN_H */
|
||||
|
@@ -89,11 +89,24 @@ extern bool static_key_initialized;
|
||||
|
||||
struct static_key {
|
||||
atomic_t enabled;
|
||||
/* Set lsb bit to 1 if branch is default true, 0 ot */
|
||||
struct jump_entry *entries;
|
||||
#ifdef CONFIG_MODULES
|
||||
struct static_key_mod *next;
|
||||
#endif
|
||||
/*
|
||||
* Note:
|
||||
* To make anonymous unions work with old compilers, the static
|
||||
* initialization of them requires brackets. This creates a dependency
|
||||
* on the order of the struct with the initializers. If any fields
|
||||
* are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
|
||||
* to be modified.
|
||||
*
|
||||
* bit 0 => 1 if key is initially true
|
||||
* 0 if initially false
|
||||
* bit 1 => 1 if points to struct static_key_mod
|
||||
* 0 if points to struct jump_entry
|
||||
*/
|
||||
union {
|
||||
unsigned long type;
|
||||
struct jump_entry *entries;
|
||||
struct static_key_mod *next;
|
||||
};
|
||||
};
|
||||
|
||||
#else
|
||||
@@ -118,9 +131,10 @@ struct module;
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
#define JUMP_TYPE_FALSE 0UL
|
||||
#define JUMP_TYPE_TRUE 1UL
|
||||
#define JUMP_TYPE_MASK 1UL
|
||||
#define JUMP_TYPE_FALSE 0UL
|
||||
#define JUMP_TYPE_TRUE 1UL
|
||||
#define JUMP_TYPE_LINKED 2UL
|
||||
#define JUMP_TYPE_MASK 3UL
|
||||
|
||||
static __always_inline bool static_key_false(struct static_key *key)
|
||||
{
|
||||
@@ -159,10 +173,10 @@ extern void static_key_disable(struct static_key *key);
|
||||
*/
|
||||
#define STATIC_KEY_INIT_TRUE \
|
||||
{ .enabled = { 1 }, \
|
||||
.entries = (void *)JUMP_TYPE_TRUE }
|
||||
{ .entries = (void *)JUMP_TYPE_TRUE } }
|
||||
#define STATIC_KEY_INIT_FALSE \
|
||||
{ .enabled = { 0 }, \
|
||||
.entries = (void *)JUMP_TYPE_FALSE }
|
||||
{ .entries = (void *)JUMP_TYPE_FALSE } }
|
||||
|
||||
#else /* !HAVE_JUMP_LABEL */
|
||||
|
||||
|
@@ -1,12 +1,12 @@
|
||||
#ifndef _LINUX_KASAN_H
|
||||
#define _LINUX_KASAN_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct kmem_cache;
|
||||
struct page;
|
||||
struct vm_struct;
|
||||
struct task_struct;
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
@@ -19,6 +19,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE];
|
||||
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
|
||||
extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
|
||||
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
|
||||
extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
|
||||
|
||||
void kasan_populate_zero_shadow(const void *shadow_start,
|
||||
const void *shadow_end);
|
||||
@@ -30,16 +31,10 @@ static inline void *kasan_mem_to_shadow(const void *addr)
|
||||
}
|
||||
|
||||
/* Enable reporting bugs after kasan_disable_current() */
|
||||
static inline void kasan_enable_current(void)
|
||||
{
|
||||
current->kasan_depth++;
|
||||
}
|
||||
extern void kasan_enable_current(void);
|
||||
|
||||
/* Disable reporting bugs for current task */
|
||||
static inline void kasan_disable_current(void)
|
||||
{
|
||||
current->kasan_depth--;
|
||||
}
|
||||
extern void kasan_disable_current(void);
|
||||
|
||||
void kasan_unpoison_shadow(const void *address, size_t size);
|
||||
|
||||
@@ -52,7 +47,7 @@ void kasan_free_pages(struct page *page, unsigned int order);
|
||||
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
|
||||
unsigned long *flags);
|
||||
void kasan_cache_shrink(struct kmem_cache *cache);
|
||||
void kasan_cache_destroy(struct kmem_cache *cache);
|
||||
void kasan_cache_shutdown(struct kmem_cache *cache);
|
||||
|
||||
void kasan_poison_slab(struct page *page);
|
||||
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
||||
@@ -98,7 +93,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
|
||||
size_t *size,
|
||||
unsigned long *flags) {}
|
||||
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
|
||||
static inline void kasan_cache_destroy(struct kmem_cache *cache) {}
|
||||
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
|
||||
|
||||
static inline void kasan_poison_slab(struct page *page) {}
|
||||
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||
|
@@ -8,7 +8,7 @@
|
||||
|
||||
/*
|
||||
* The use of "&&" / "||" is limited in certain expressions.
|
||||
* The followings enable to calculate "and" / "or" with macro expansion only.
|
||||
* The following enable to calculate "and" / "or" with macro expansion only.
|
||||
*/
|
||||
#define __and(x, y) ___and(x, y)
|
||||
#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
|
||||
|
@@ -100,16 +100,18 @@
|
||||
)
|
||||
|
||||
/*
|
||||
* Divide positive or negative dividend by positive divisor and round
|
||||
* to closest integer. Result is undefined for negative divisors and
|
||||
* for negative dividends if the divisor variable type is unsigned.
|
||||
* Divide positive or negative dividend by positive or negative divisor
|
||||
* and round to closest integer. Result is undefined for negative
|
||||
* divisors if he dividend variable type is unsigned and for negative
|
||||
* dividends if the divisor variable type is unsigned.
|
||||
*/
|
||||
#define DIV_ROUND_CLOSEST(x, divisor)( \
|
||||
{ \
|
||||
typeof(x) __x = x; \
|
||||
typeof(divisor) __d = divisor; \
|
||||
(((typeof(x))-1) > 0 || \
|
||||
((typeof(divisor))-1) > 0 || (__x) > 0) ? \
|
||||
((typeof(divisor))-1) > 0 || \
|
||||
(((__x) > 0) == ((__d) > 0))) ? \
|
||||
(((__x) + ((__d) / 2)) / (__d)) : \
|
||||
(((__x) - ((__d) / 2)) / (__d)); \
|
||||
} \
|
||||
|
@@ -46,6 +46,7 @@ enum kernfs_node_flag {
|
||||
KERNFS_SUICIDAL = 0x0400,
|
||||
KERNFS_SUICIDED = 0x0800,
|
||||
KERNFS_EMPTY_DIR = 0x1000,
|
||||
KERNFS_HAS_RELEASE = 0x2000,
|
||||
};
|
||||
|
||||
/* @flags for kernfs_create_root() */
|
||||
@@ -175,6 +176,7 @@ struct kernfs_open_file {
|
||||
/* published fields */
|
||||
struct kernfs_node *kn;
|
||||
struct file *file;
|
||||
struct seq_file *seq_file;
|
||||
void *priv;
|
||||
|
||||
/* private fields, do not use outside kernfs proper */
|
||||
@@ -185,11 +187,19 @@ struct kernfs_open_file {
|
||||
char *prealloc_buf;
|
||||
|
||||
size_t atomic_write_len;
|
||||
bool mmapped;
|
||||
bool mmapped:1;
|
||||
bool released:1;
|
||||
const struct vm_operations_struct *vm_ops;
|
||||
};
|
||||
|
||||
struct kernfs_ops {
|
||||
/*
|
||||
* Optional open/release methods. Both are called with
|
||||
* @of->seq_file populated.
|
||||
*/
|
||||
int (*open)(struct kernfs_open_file *of);
|
||||
void (*release)(struct kernfs_open_file *of);
|
||||
|
||||
/*
|
||||
* Read is handled by either seq_file or raw_read().
|
||||
*
|
||||
|
@@ -354,7 +354,10 @@ static inline bool key_is_instantiated(const struct key *key)
|
||||
!test_bit(KEY_FLAG_NEGATIVE, &key->flags);
|
||||
}
|
||||
|
||||
#define rcu_dereference_key(KEY) \
|
||||
#define dereference_key_rcu(KEY) \
|
||||
(rcu_dereference((KEY)->payload.rcu_data0))
|
||||
|
||||
#define dereference_key_locked(KEY) \
|
||||
(rcu_dereference_protected((KEY)->payload.rcu_data0, \
|
||||
rwsem_is_locked(&((struct key *)(KEY))->sem)))
|
||||
|
||||
|
@@ -1,7 +1,8 @@
|
||||
#ifndef _LINUX_KHUGEPAGED_H
|
||||
#define _LINUX_KHUGEPAGED_H
|
||||
|
||||
#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
|
||||
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
|
||||
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
extern struct attribute_group khugepaged_attr_group;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user