Merge drm/drm-next into drm-intel-next-queued

Pull in v4.20-rc3 via drm-next.

Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
Jani Nikula
2018-11-20 13:14:08 +02:00
10556 changed files with 522082 additions and 239320 deletions

View File

@@ -831,8 +831,6 @@ static inline int acpi_dma_configure(struct device *dev,
return 0;
}
static inline void acpi_dma_deconfigure(struct device *dev) { }
#define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -1074,6 +1072,15 @@ static inline int acpi_node_get_property_reference(
NR_FWNODE_REFERENCE_ARGS, args);
}
static inline bool acpi_dev_has_props(const struct acpi_device *adev)
{
return !list_empty(&adev->data.properties);
}
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
const union acpi_object *properties);
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
int acpi_dev_prop_read_single(struct acpi_device *adev,

18
include/linux/adxl.h Normal file
View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Address translation interface via ACPI DSM.
* Copyright (C) 2018 Intel Corporation
*/
#ifndef _LINUX_ADXL_H
#define _LINUX_ADXL_H
#ifdef CONFIG_ACPI_ADXL
const char * const *adxl_get_component_names(void);
int adxl_decode(u64 addr, u64 component_values[]);
#else
static inline const char * const *adxl_get_component_names(void) { return NULL; }
static inline int adxl_decode(u64 addr, u64 component_values[]) { return -EOPNOTSUPP; }
#endif
#endif /* _LINUX_ADXL_H */

View File

@@ -18,20 +18,13 @@
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
* @status: if no GPIO read function was given to the block in
* gpio_wp (below) this function will be called to determine
* whether a card is present in the MMC slot or not
* @gpio_wp: read this GPIO pin to see if the card is write protected
* @gpio_cd: read this GPIO pin to detect card insertion
* @cd_invert: true if the gpio_cd pin value is active low
* @status: if no GPIO line was given to the block in this function will
* be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;
bool cd_invert;
};
#endif

View File

@@ -1,63 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AMIFD_H
#define _AMIFD_H
/* Definitions for the Amiga floppy driver */
#include <linux/fd.h>
#define FD_MAX_UNITS 4 /* Max. Number of drives */
#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
#ifndef ASSEMBLER
struct fd_data_type {
char *name; /* description of data type */
int sects; /* sectors per track */
#ifdef __STDC__
int (*read_fkt)(int);
void (*write_fkt)(int);
#else
int (*read_fkt)(); /* read whole track */
void (*write_fkt)(); /* write whole track */
#endif
};
/*
** Floppy type descriptions
*/
struct fd_drive_type {
unsigned long code; /* code returned from drive */
char *name; /* description of drive */
unsigned int tracks; /* number of tracks */
unsigned int heads; /* number of heads */
unsigned int read_size; /* raw read size for one track */
unsigned int write_size; /* raw write size for one track */
unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
unsigned int precomp1; /* start track for precomp 1 */
unsigned int precomp2; /* start track for precomp 2 */
unsigned int step_delay; /* time (in ms) for delay after step */
unsigned int settle_time; /* time to settle after dir change */
unsigned int side_time; /* time needed to change sides */
};
struct amiga_floppy_struct {
struct fd_drive_type *type; /* type of floppy for this unit */
struct fd_data_type *dtype; /* type of floppy for this unit */
int track; /* current track (-1 == unknown) */
unsigned char *trackbuf; /* current track (kmaloc()'d */
int blocks; /* total # blocks on disk */
int changed; /* true when not known */
int disk; /* disk in drive (-1 == unknown) */
int motor; /* true when motor is at speed */
int busy; /* true when drive is active */
int dirty; /* true when trackbuf is not on disk */
int status; /* current error code for unit */
struct gendisk *gendisk;
};
#endif
#endif

View File

@@ -1,82 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AMIFDREG_H
#define _LINUX_AMIFDREG_H
/*
** CIAAPRA bits (read only)
*/
#define DSKRDY (0x1<<5) /* disk ready when low */
#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
#define DSKPROT (0x1<<3) /* disk protected when low */
#define DSKCHANGE (0x1<<2) /* low when disk removed */
/*
** CIAAPRB bits (read/write)
*/
#define DSKMOTOR (0x1<<7) /* motor on when low */
#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
#define DSKSTEP (0x1) /* pulse low to step head 1 track */
/*
** DSKBYTR bits (read only)
*/
#define DSKBYT (1<<15) /* register contains valid byte when set */
#define DMAON (1<<14) /* disk DMA enabled */
#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
/* bits 7-0 are data */
/*
** ADKCON/ADKCONR bits
*/
#ifndef SETCLR
#define ADK_SETCLR (1<<15) /* control bit */
#endif
#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
/*
** DSKLEN bits
*/
#define DSKLEN_DMAEN (1<<15)
#define DSKLEN_WRITE (1<<14)
/*
** INTENA/INTREQ bits
*/
#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
/*
** Misc
*/
#define MFM_SYNC 0x4489 /* standard MFM sync value */
/* Values for FD_COMMAND */
#define FD_RECALIBRATE 0x07 /* move to track 0 */
#define FD_SEEK 0x0F /* seek track */
#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
#define FD_WRITE 0xC5 /* write with MT, MFM */
#define FD_SENSEI 0x08 /* Sense Interrupt Status */
#define FD_SPECIFY 0x03 /* specify HUT etc */
#define FD_FORMAT 0x4D /* format one track */
#define FD_VERSION 0x10 /* get version code */
#define FD_CONFIGURE 0x13 /* configure FIFO operation */
#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
#endif /* _LINUX_AMIFDREG_H */

View File

@@ -9,6 +9,7 @@
#include <linux/percpu.h>
void topology_normalize_cpu_scale(void);
int topology_update_cpu_topology(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);

View File

@@ -62,13 +62,19 @@
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
VIRTCHNL_ERR_PARAM = -5,
VIRTCHNL_STATUS_ERR_PARAM = -5,
VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
/* Backward compatibility */
#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
@@ -252,6 +258,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -573,7 +581,7 @@ struct virtchnl_filter {
enum virtchnl_flow_type flow_type;
enum virtchnl_action action;
u32 action_meta;
__u8 field_flags;
u8 field_flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
@@ -596,10 +604,23 @@ enum virtchnl_event_codes {
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
* get the speed and link information. The ability to understand
* new speeds is indicated by setting the capability flag
* VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
* in virtchnl_vf_resource struct and can be used to determine
* which link event struct to use below.
*/
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
} link_event;
struct {
/* link_speed provided in Mbps */
u32 link_speed;
u8 link_status;
} link_event_adv;
} event_data;
int severity;
@@ -816,7 +837,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
return VIRTCHNL_ERR_PARAM;
return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)

View File

@@ -78,7 +78,7 @@ struct linux_binprm {
/* Function parameter for binfmt->coredump */
struct coredump_params {
const siginfo_t *siginfo;
const kernel_siginfo_t *siginfo;
struct pt_regs *regs;
struct file *file;
unsigned long limit;

View File

@@ -21,12 +21,8 @@
#include <linux/highmem.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
#include <linux/bug.h>
#ifdef CONFIG_BLOCK
#include <asm/io.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
@@ -132,32 +128,6 @@ static inline bool bio_full(struct bio *bio)
return bio->bi_vcnt >= bio->bi_max_vecs;
}
/*
* will die
*/
#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
/*
* merge helpers etc
*/
/* Default implementation of BIOVEC_PHYS_MERGEABLE */
#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
/*
* allow arch override, for eg virtualized architectures (put in asm/io.h)
*/
#ifndef BIOVEC_PHYS_MERGEABLE
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
__BIOVEC_PHYS_MERGEABLE(vec1, vec2)
#endif
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
@@ -170,27 +140,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
if (bio_no_advance_iter(bio)) {
if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
iter->bi_done += bytes;
} else {
else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
/* TODO: It is reasonable to complete bio with error here. */
}
}
static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
unsigned int bytes)
{
iter->bi_sector -= bytes >> 9;
if (bio_no_advance_iter(bio)) {
iter->bi_size += bytes;
iter->bi_done -= bytes;
return true;
}
return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
}
#define __bio_for_each_segment(bvl, bio, iter, start) \
@@ -353,6 +307,8 @@ struct bio_integrity_payload {
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
struct bvec_iter bio_iter; /* for rewinding parent bio */
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;

View File

@@ -28,8 +28,8 @@
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
* The generated code is more efficient when nbits is known at
* compile-time and at most BITS_PER_LONG.
*
* ::
*
@@ -204,38 +204,31 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
/*
* The static inlines below do not handle constant nbits==0 correctly,
* so make such users (should any ever turn up) call the out-of-line
* versions.
*/
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~0UL;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
/*
@@ -398,7 +391,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, int nbits)
unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;

View File

@@ -236,33 +236,33 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
#ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, _mask, _bits) \
#define set_mask_bits(ptr, mask, bits) \
({ \
const typeof(*ptr) mask = (_mask), bits = (_bits); \
typeof(*ptr) old, new; \
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
do { \
old = READ_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
old__ = READ_ONCE(*(ptr)); \
new__ = (old__ & ~mask__) | bits__; \
} while (cmpxchg(ptr, old__, new__) != old__); \
\
new; \
new__; \
})
#endif
#ifndef bit_clear_unless
#define bit_clear_unless(ptr, _clear, _test) \
#define bit_clear_unless(ptr, clear, test) \
({ \
const typeof(*ptr) clear = (_clear), test = (_test); \
typeof(*ptr) old, new; \
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
do { \
old = READ_ONCE(*ptr); \
new = old & ~clear; \
} while (!(old & test) && \
cmpxchg(ptr, old, new) != old); \
old__ = READ_ONCE(*(ptr)); \
new__ = old__ & ~clear__; \
} while (!(old__ & test__) && \
cmpxchg(ptr, old__, new__) != old__); \
\
!(old & test); \
!(old__ & test__); \
})
#endif

View File

@@ -203,6 +203,10 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops,
unsigned int queue_depth,
unsigned int set_flags);
int blk_mq_register_dev(struct device *, struct request_queue *);
void blk_mq_unregister_dev(struct device *, struct request_queue *);

24
include/linux/blk-pm.h Normal file
View File

@@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BLK_PM_H_
#define _BLK_PM_H_
struct device;
struct request_queue;
/*
* block layer runtime pm functions
*/
#ifdef CONFIG_PM
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q, int err);
extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
#endif
#endif /* _BLK_PM_H_ */

View File

@@ -284,8 +284,6 @@ enum req_opf {
REQ_OP_FLUSH = 2,
/* discard sectors */
REQ_OP_DISCARD = 3,
/* get zone information */
REQ_OP_ZONE_REPORT = 4,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
/* seset a zone write pointer */

View File

@@ -108,7 +108,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_QUIET ((__force req_flags_t)(1 << 11))
/* elevator private data attached */
#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
/* account I/O stat */
/* account into disk and partition IO statistics */
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
/* request came from our alloc pool */
#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
@@ -116,7 +116,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_PM ((__force req_flags_t)(1 << 15))
/* on IO scheduler merge hash */
#define RQF_HASHED ((__force req_flags_t)(1 << 16))
/* IO stats tracking on */
/* track IO completion time */
#define RQF_STATS ((__force req_flags_t)(1 << 17))
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
@@ -396,16 +396,13 @@ struct queue_limits {
#ifdef CONFIG_BLK_DEV_ZONED
struct blk_zone_report_hdr {
unsigned int nr_zones;
u8 padding[60];
};
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
extern int blkdev_report_zones(struct block_device *bdev,
sector_t sector, struct blk_zone *zones,
unsigned int *nr_zones, gfp_t gfp_mask);
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
sector_t nr_sectors, gfp_t gfp_mask);
extern int blk_revalidate_disk_zones(struct gendisk *disk);
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
@@ -414,6 +411,16 @@ extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
{
return 0;
}
static inline int blk_revalidate_disk_zones(struct gendisk *disk)
{
return 0;
}
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
fmode_t mode, unsigned int cmd,
unsigned long arg)
@@ -504,6 +511,12 @@ struct request_queue {
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
/*
* Number of contexts that have called blk_set_pm_only(). If this
* counter is above zero then only RQF_PM and RQF_PREEMPT requests are
* processed.
*/
atomic_t pm_only;
/*
* ida allocated id for this queue. Used to index queues from
@@ -679,7 +692,7 @@ struct request_queue {
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
@@ -693,12 +706,12 @@ struct request_queue {
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
#define QUEUE_FLAG_STATS 24 /* track rq completion times */
#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -731,17 +744,18 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_scsi_passthrough(q) \
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_preempt_only(q) \
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
extern int blk_set_preempt_only(struct request_queue *q);
extern void blk_clear_preempt_only(struct request_queue *q);
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
static inline int queue_in_flight(struct request_queue *q)
{
@@ -799,6 +813,11 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
}
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->nr_zones : 0;
}
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
@@ -814,6 +833,11 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
return 0;
}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
@@ -1280,29 +1304,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
/*
* block layer runtime pm functions
*/
#ifdef CONFIG_PM
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q, int err);
extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
static inline int blk_pre_runtime_suspend(struct request_queue *q)
{
return -ENOSYS;
}
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
static inline void blk_set_runtime_active(struct request_queue *q) {}
#endif
/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
@@ -1676,94 +1677,6 @@ static inline void put_dev_sector(Sector p)
put_page(p.v);
}
static inline bool __bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset)
{
return offset ||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
*/
static inline bool bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset)
{
if (!queue_virt_boundary(q))
return false;
return __bvec_gap_to_prev(q, bprv, offset);
}
/*
* Check if the two bvecs from two bios can be merged to one segment.
* If yes, no need to check gap between the two bios since the 1st bio
* and the 1st bvec in the 2nd bio can be handled in one segment.
*/
static inline bool bios_segs_mergeable(struct request_queue *q,
struct bio *prev, struct bio_vec *prev_last_bv,
struct bio_vec *next_first_bv)
{
if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
return false;
if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
return false;
if (prev->bi_seg_back_size + next_first_bv->bv_len >
queue_max_segment_size(q))
return false;
return true;
}
static inline bool bio_will_gap(struct request_queue *q,
struct request *prev_rq,
struct bio *prev,
struct bio *next)
{
if (bio_has_data(prev) && queue_virt_boundary(q)) {
struct bio_vec pb, nb;
/*
* don't merge if the 1st bio starts with non-zero
* offset, otherwise it is quite difficult to respect
* sg gap limit. We work hard to merge a huge number of small
* single bios in case of mkfs.
*/
if (prev_rq)
bio_get_first_bvec(prev_rq->bio, &pb);
else
bio_get_first_bvec(prev, &pb);
if (pb.bv_offset)
return true;
/*
* We don't need to worry about the situation that the
* merged segment ends in unaligned virt boundary:
*
* - if 'pb' ends aligned, the merged segment ends aligned
* - if 'pb' ends unaligned, the next bio must include
* one single bvec of 'nb', otherwise the 'nb' can't
* merge with 'pb'
*/
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
if (!bios_segs_mergeable(q, prev, &pb, &nb))
return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
}
return false;
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, req, req->biotail, bio);
}
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, NULL, bio, req->bio);
}
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
@@ -1843,26 +1756,6 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
struct bio_integrity_payload *bip = bio_integrity(req->bio);
struct bio_integrity_payload *bip_next = bio_integrity(next);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
/**
* bio_integrity_intervals - Return number of integrity intervals for a bio
* @bi: blk_integrity profile for device
@@ -1947,17 +1840,6 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
return true;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
return false;
}
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors)
{
@@ -1987,6 +1869,9 @@ struct block_device_operations {
int (*getgeo)(struct block_device *, struct hd_geometry *);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask);
struct module *owner;
const struct pr_ops *pr_ops;
};

View File

@@ -1,404 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
*/
#ifndef _LINUX_BOOTMEM_H
#define _LINUX_BOOTMEM_H
#include <linux/mmzone.h>
#include <linux/mm_types.h>
#include <asm/dma.h>
#include <asm/processor.h>
/*
* simple boot-time physical memory area allocator.
*/
extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
/*
* highest page
*/
extern unsigned long max_pfn;
/*
* highest possible page
*/
extern unsigned long long max_possible_pfn;
#ifndef CONFIG_NO_BOOTMEM
/**
* struct bootmem_data - per-node information used by the bootmem allocator
* @node_min_pfn: the starting physical address of the node's memory
* @node_low_pfn: the end physical address of the directly addressable memory
* @node_bootmem_map: is a bitmap pointer - the bits represent all physical
* memory pages (including holes) on the node.
* @last_end_off: the offset within the page of the end of the last allocation;
* if 0, the page used is full
* @hint_idx: the PFN of the page used with the last allocation;
* together with using this with the @last_end_offset field,
* a test can be made to see if allocations can be merged
* with the page used for the last allocation rather than
* using up a full new page.
* @list: list entry in the linked list ordered by the memory addresses
*/
typedef struct bootmem_data {
unsigned long node_min_pfn;
unsigned long node_low_pfn;
void *node_bootmem_map;
unsigned long last_end_off;
unsigned long hint_idx;
struct list_head list;
} bootmem_data_t;
extern bootmem_data_t bootmem_node_data[];
#endif
extern unsigned long bootmem_bootmap_pages(unsigned long);
extern unsigned long init_bootmem_node(pg_data_t *pgdat,
unsigned long freepfn,
unsigned long startpfn,
unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
extern unsigned long free_all_bootmem(void);
extern void reset_node_managed_pages(pg_data_t *pgdat);
extern void reset_all_zones_managed_pages(void);
extern void free_bootmem_node(pg_data_t *pgdat,
unsigned long addr,
unsigned long size);
extern void free_bootmem(unsigned long physaddr, unsigned long size);
extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
/*
* Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
* the architecture-specific code should honor this).
*
* If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
* If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
* already was reserved.
*/
#define BOOTMEM_DEFAULT 0
#define BOOTMEM_EXCLUSIVE (1<<0)
extern int reserve_bootmem(unsigned long addr,
unsigned long size,
int flags);
extern int reserve_bootmem_node(pg_data_t *pgdat,
unsigned long physaddr,
unsigned long size,
int flags);
extern void *__alloc_bootmem(unsigned long size,
unsigned long align,
unsigned long goal);
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
void *__alloc_bootmem_node_high(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit) __malloc;
extern void *__alloc_bootmem_low(unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
void *__alloc_bootmem_low_nopanic(unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
#ifdef CONFIG_NO_BOOTMEM
/* We are using top down, so it is safe to use 0 here */
#define BOOTMEM_LOW_LIMIT 0
#else
#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
#endif
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_align(x, align) \
__alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages(x) \
__alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages_nopanic(x) \
__alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_node_nopanic(pgdat, x) \
__alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
__alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_low(x) \
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
#define alloc_bootmem_low_pages_nopanic(x) \
__alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_low_pages_node(pgdat, x) \
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
/* FIXME: use MEMBLOCK_ALLOC_* variants here */
#define BOOTMEM_ALLOC_ACCESSIBLE 0
#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr,
phys_addr_t max_addr, int nid);
void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
phys_addr_t align, phys_addr_t min_addr,
phys_addr_t max_addr, int nid);
void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid);
void __memblock_free_early(phys_addr_t base, phys_addr_t size);
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
static inline void * __init memblock_virt_alloc(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_raw(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_nopanic(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid_nopanic(size, align,
BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid(size, align,
BOOTMEM_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_low_nopanic(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid_nopanic(size, align,
BOOTMEM_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_from_nopanic(
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
{
return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_node(
phys_addr_t size, int nid)
{
return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE, nid);
}
static inline void * __init memblock_virt_alloc_node_nopanic(
phys_addr_t size, int nid)
{
return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
nid);
}
static inline void __init memblock_free_early(
phys_addr_t base, phys_addr_t size)
{
__memblock_free_early(base, size);
}
static inline void __init memblock_free_early_nid(
phys_addr_t base, phys_addr_t size, int nid)
{
__memblock_free_early(base, size);
}
static inline void __init memblock_free_late(
phys_addr_t base, phys_addr_t size)
{
__memblock_free_late(base, size);
}
#else
#define BOOTMEM_ALLOC_ACCESSIBLE 0
/* Fall back to all the existing bootmem APIs */
static inline void * __init memblock_virt_alloc(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_raw(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_nopanic(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_low(size, align, 0);
}
static inline void * __init memblock_virt_alloc_low_nopanic(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_low_nopanic(size, align, 0);
}
static inline void * __init memblock_virt_alloc_from_nopanic(
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
{
return __alloc_bootmem_nopanic(size, align, min_addr);
}
static inline void * __init memblock_virt_alloc_node(
phys_addr_t size, int nid)
{
return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_node_nopanic(
phys_addr_t size, int nid)
{
return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
SMP_CACHE_BYTES,
BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
min_addr);
}
static inline void * __init memblock_virt_alloc_try_nid_raw(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
min_addr, max_addr);
}
static inline void * __init memblock_virt_alloc_try_nid_nopanic(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
min_addr, max_addr);
}
static inline void __init memblock_free_early(
phys_addr_t base, phys_addr_t size)
{
free_bootmem(base, size);
}
static inline void __init memblock_free_early_nid(
phys_addr_t base, phys_addr_t size, int nid)
{
free_bootmem_node(NODE_DATA(nid), base, size);
}
static inline void __init memblock_free_late(
phys_addr_t base, phys_addr_t size)
{
free_bootmem_late(base, size);
}
#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
unsigned long numentries,
int scale,
int flags,
unsigned int *_hash_shift,
unsigned int *_hash_mask,
unsigned long low_limit,
unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
* shift passed via *_hash_shift */
#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist; /* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif
#endif /* _LINUX_BOOTMEM_H */

View File

@@ -2,6 +2,7 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
#include <linux/bpf.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
@@ -22,7 +23,11 @@ struct bpf_cgroup_storage;
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
DECLARE_PER_CPU(void*, bpf_cgroup_storage);
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
struct bpf_cgroup_storage_map;
@@ -32,7 +37,10 @@ struct bpf_storage_buffer {
};
struct bpf_cgroup_storage {
struct bpf_storage_buffer *buf;
union {
struct bpf_storage_buffer *buf;
void __percpu *percpu_buf;
};
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
struct list_head list;
@@ -43,7 +51,7 @@ struct bpf_cgroup_storage {
struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
struct bpf_cgroup_storage *storage;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
struct bpf_prog_array;
@@ -101,18 +109,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
struct bpf_storage_buffer *buf;
if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
return BPF_CGROUP_STORAGE_PERCPU;
if (!storage)
return;
buf = READ_ONCE(storage->buf);
this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
return BPF_CGROUP_STORAGE_SHARED;
}
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
{
enum bpf_cgroup_storage_type stype;
for_each_cgroup_storage_type(stype)
this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
}
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
struct cgroup *cgroup,
@@ -121,6 +137,10 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
@@ -265,15 +285,24 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
static inline void bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
struct bpf_map *map) { return 0; }
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog) { return 0; }
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; }
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
void *value) {
return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
void *key, void *value, u64 flags) {
return 0;
}
#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
@@ -293,6 +322,8 @@ static inline void bpf_cgroup_storage_free(
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
#define for_each_cgroup_storage_type(stype) for (; false; )
#endif /* CONFIG_CGROUP_BPF */
#endif /* _BPF_CGROUP_H */

View File

@@ -39,6 +39,9 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
int (*map_pop_elem)(struct bpf_map *map, void *value);
int (*map_peek_elem)(struct bpf_map *map, void *value);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -138,6 +141,7 @@ enum bpf_arg_type {
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
/* the following constraints used to prototype bpf_memcmp() and other
* functions that access data on eBPF program stack
@@ -154,6 +158,7 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
};
/* type of values returned from helper functions */
@@ -162,6 +167,7 @@ enum bpf_return_type {
RET_VOID, /* function doesn't return anything */
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -212,6 +218,9 @@ enum bpf_reg_type {
PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
};
/* The information passed from prog-specific *_is_valid_access
@@ -258,6 +267,7 @@ struct bpf_verifier_ops {
struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
};
struct bpf_prog_offload {
@@ -271,6 +281,14 @@ struct bpf_prog_offload {
u32 jited_len;
};
enum bpf_cgroup_storage_type {
BPF_CGROUP_STORAGE_SHARED,
BPF_CGROUP_STORAGE_PERCPU,
__BPF_CGROUP_STORAGE_MAX
};
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
@@ -288,7 +306,7 @@ struct bpf_prog_aux {
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
struct bpf_map *cgroup_storage;
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
@@ -334,6 +352,11 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog,
u32 *target_size);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
@@ -357,7 +380,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
struct bpf_cgroup_storage *cgroup_storage;
struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
struct bpf_prog_array {
@@ -718,33 +741,18 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
int sockmap_get_from_fd(const union bpf_attr *attr, int type,
struct bpf_prog *prog);
#if defined(CONFIG_BPF_STREAM_PARSER)
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
#else
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
{
return NULL;
}
static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map,
void *key)
{
return NULL;
}
static inline int sock_map_prog(struct bpf_map *map,
struct bpf_prog *prog,
u32 type)
static inline int sock_map_prog_update(struct bpf_map *map,
struct bpf_prog *prog, u32 which)
{
return -EOPNOTSUPP;
}
static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
struct bpf_prog *prog)
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
struct bpf_prog *prog)
{
return -EINVAL;
}
@@ -806,6 +814,9 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
@@ -820,6 +831,10 @@ extern const struct bpf_func_proto bpf_get_stack_proto;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
@@ -827,4 +842,29 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#if defined(CONFIG_NET)
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
#else
static inline bool bpf_sock_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
return false;
}
static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size)
{
return 0;
}
#endif
#endif /* _LINUX_BPF_H */

View File

@@ -16,6 +16,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
@@ -42,6 +43,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
@@ -49,13 +51,13 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
#ifdef CONFIG_PERF_EVENTS
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
#if defined(CONFIG_BPF_STREAM_PARSER)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
#endif
@@ -67,3 +69,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)

View File

@@ -41,6 +41,7 @@ enum bpf_reg_liveness {
};
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
union {
/* valid when type == PTR_TO_PACKET */
@@ -50,6 +51,9 @@ struct bpf_reg_state {
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
/* Max size from any of the above. */
unsigned long raw;
};
/* Fixed part of pointer offset, pointer types only */
s32 off;
@@ -57,9 +61,10 @@ struct bpf_reg_state {
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
* came from, when one is tested for != NULL.
* For PTR_TO_SOCKET this is used to share which pointers retain the
* same reference to the socket, to determine proper reference freeing.
*/
u32 id;
/* Ordering of fields matters. See states_equal() */
/* For scalar types (SCALAR_VALUE), this represents our knowledge of
* the actual value.
* For pointer types, this represents the variable part of the offset
@@ -76,15 +81,15 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
/* parentage chain for liveness checking */
struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
* is used which is an index in bpf_verifier_state->frame[] array
* pointing to bpf_func_state.
* This field must be second to last, for states_equal() reasons.
*/
u32 frameno;
/* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live;
};
@@ -102,12 +107,22 @@ struct bpf_stack_state {
u8 slot_type[BPF_REG_SIZE];
};
struct bpf_reference_state {
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
*/
int id;
/* Instruction where the allocation of this reference occurred. This
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
};
/* state of the program:
* type of all registers and stack info
*/
struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
struct bpf_verifier_state *parent;
/* index of call instruction that called into this func */
int callsite;
/* stack frame number of this function state from pov of
@@ -120,7 +135,9 @@ struct bpf_func_state {
*/
u32 subprogno;
/* should be second to last. See copy_func_state() */
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
int allocated_stack;
struct bpf_stack_state *stack;
};
@@ -129,10 +146,20 @@ struct bpf_func_state {
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
u32 curframe;
};
#define bpf_get_spilled_reg(slot, frame) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
(frame->stack[slot].slot_type[0] == STACK_SPILL)) \
? &frame->stack[slot].spilled_ptr : NULL)
/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
#define bpf_for_each_spilled_reg(iter, frame, reg) \
for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
iter < frame->allocated_stack / BPF_REG_SIZE; \
iter++, reg = bpf_get_spilled_reg(iter, frame))
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -206,15 +233,21 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[cur->curframe]->regs;
return cur->frame[cur->curframe];
}
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
{
return cur_func(env)->regs;
}
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
#endif /* _LINUX_BPF_VERIFIER_H */

View File

@@ -242,7 +242,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
/* Convert errno to return value from ->page_mkwrite() call */
static inline int block_page_mkwrite_return(int err)
static inline vm_fault_t block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;

View File

@@ -40,8 +40,6 @@ struct bvec_iter {
unsigned int bi_idx; /* current index into bvl_vec */
unsigned int bi_done; /* number of bytes completed */
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
};
@@ -85,7 +83,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
bytes -= len;
iter->bi_size -= len;
iter->bi_bvec_done += len;
iter->bi_done += len;
if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
iter->bi_bvec_done = 0;

View File

@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_CEPHX_V2)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC)
#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif

View File

@@ -81,7 +81,13 @@ struct ceph_options {
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
/*
* Handle the largest possible rbd object in one message.
* There is no limit on the size of cephfs objects, but it has to obey
* rsize and wsize mount options anyway.
*/
#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
#define CEPH_AUTH_NAME_DEFAULT "guest"

View File

@@ -82,22 +82,6 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
};
static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
{
switch (type) {
case CEPH_MSG_DATA_NONE:
case CEPH_MSG_DATA_PAGES:
case CEPH_MSG_DATA_PAGELIST:
#ifdef CONFIG_BLOCK
case CEPH_MSG_DATA_BIO:
#endif /* CONFIG_BLOCK */
case CEPH_MSG_DATA_BVECS:
return true;
default:
return false;
}
}
#ifdef CONFIG_BLOCK
struct ceph_bio_iter {
@@ -181,7 +165,6 @@ struct ceph_bvec_iter {
} while (0)
struct ceph_msg_data {
struct list_head links; /* ceph_msg->data */
enum ceph_msg_data_type type;
union {
#ifdef CONFIG_BLOCK
@@ -202,7 +185,6 @@ struct ceph_msg_data {
struct ceph_msg_data_cursor {
size_t total_resid; /* across all data items */
struct list_head *data_head; /* = &ceph_msg->data */
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
@@ -240,7 +222,9 @@ struct ceph_msg {
struct ceph_buffer *middle;
size_t data_length;
struct list_head data;
struct ceph_msg_data *data;
int num_data_items;
int max_data_items;
struct ceph_msg_data_cursor cursor;
struct ceph_connection *con;
@@ -381,6 +365,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);

View File

@@ -13,14 +13,15 @@ struct ceph_msgpool {
mempool_t *pool;
int type; /* preallocated message type */
int front_len; /* preallocated payload size */
int max_data_items;
};
extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int size, bool blocking,
const char *name);
int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int max_data_items, int size,
const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
int front_len);
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
int max_data_items);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
#endif

View File

@@ -136,6 +136,13 @@ struct ceph_osd_req_op {
u64 expected_object_size;
u64 expected_write_size;
} alloc_hint;
struct {
u64 snapid;
u64 src_version;
u8 flags;
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
};
};
@@ -444,9 +451,8 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
const char *class, const char *method);
int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
const char *class, const char *method);
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
@@ -511,6 +517,16 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct timespec64 *mtime,
struct page **pages, int nr_pages);
int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
u64 src_snapid, u64 src_version,
struct ceph_object_id *src_oid,
struct ceph_object_locator *src_oloc,
u32 src_fadvise_flags,
struct ceph_object_id *dst_oid,
struct ceph_object_locator *dst_oloc,
u32 dst_fadvise_flags,
u8 copy_from_flags);
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,

View File

@@ -23,16 +23,7 @@ struct ceph_pagelist_cursor {
size_t room; /* room remaining to reset to */
};
static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
{
INIT_LIST_HEAD(&pl->head);
pl->mapped_tail = NULL;
pl->length = 0;
pl->room = 0;
INIT_LIST_HEAD(&pl->free_list);
pl->num_pages_free = 0;
refcount_set(&pl->refcnt, 1);
}
struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);

View File

@@ -410,6 +410,14 @@ enum {
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */
CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in
the near future */
CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed
in the near future */
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only
once by this client */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
@@ -431,6 +439,15 @@ enum {
CEPH_OSD_CMPXATTR_MODE_U64 = 2
};
enum {
CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */
CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */
CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */
CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
* cloneid */
CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */
};
enum {
CEPH_OSD_WATCH_OP_UNWATCH = 0,
CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
@@ -497,6 +514,17 @@ struct ceph_osd_op {
__le64 expected_object_size;
__le64 expected_write_size;
} __attribute__ ((packed)) alloc_hint;
struct {
__le64 snapid;
__le64 src_version;
__u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */
/*
* CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags
* for src object, flags for dest object are in
* ceph_osd_op::flags.
*/
__le32 src_fadvise_flags;
} __attribute__ ((packed)) copy_from;
};
__le32 payload_len;
} __attribute__ ((packed));

View File

@@ -20,6 +20,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
#include <linux/bpf-cgroup.h>
#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -412,6 +413,7 @@ struct cgroup {
* specific task are charged to the dom_cgrp.
*/
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp; /* used while enabling threaded */
/* per-cpu recursive resource statistics */
struct cgroup_rstat_cpu __percpu *rstat_cpu;
@@ -435,6 +437,9 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
/* used to track pressure stalls */
struct psi_group psi;
/* used to store eBPF programs */
struct cgroup_bpf bpf;

View File

@@ -567,20 +567,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
struct cgroup *ptr;
if (cgrp->level < ancestor_level)
return NULL;
for (ptr = cgrp;
ptr && ptr->level > ancestor_level;
ptr = cgroup_parent(ptr))
;
if (ptr && ptr->level == ancestor_level)
return ptr;
return NULL;
while (cgrp && cgrp->level > ancestor_level)
cgrp = cgroup_parent(cgrp);
return cgrp;
}
/**
@@ -657,6 +648,11 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
{
return &cgrp->psi;
}
static inline void cgroup_init_kthreadd(void)
{
/*
@@ -710,6 +706,16 @@ static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
return NULL;
}
static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
return NULL;
}
static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
{
return NULL;
}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{

View File

@@ -119,6 +119,11 @@ struct clk_duty {
* Called with enable_lock held. This function must not
* sleep.
*
* @save_context: Save the context of the clock in prepration for poweroff.
*
* @restore_context: Restore the context of the clock after a restoration
* of power.
*
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
* ensure that the prepare_mutex is held across this call.
@@ -223,6 +228,8 @@ struct clk_ops {
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
void (*disable_unused)(struct clk_hw *hw);
int (*save_context)(struct clk_hw *hw);
void (*restore_context)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
@@ -1011,5 +1018,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
#endif /* platform dependent I/O accessors */
void clk_gate_restore_context(struct clk_hw *hw);
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */

View File

@@ -312,7 +312,26 @@ struct clk *clk_get(struct device *dev, const char *id);
*/
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
* clk_bulk_get_all - lookup and obtain all available references to clock
* producer.
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
*
* This helper function allows drivers to get all clk consumers in one
* operation. If any of the clk cannot be acquired then any clks
* that were obtained will be freed before returning to the caller.
*
* Returns a positive value for the number of clocks obtained while the
* clock references are stored in the clk_bulk_data table in @clks field.
* Returns 0 if there're none and a negative value if something failed.
*
* Drivers must assume that the clock source is not enabled.
*
* clk_bulk_get should not be called from within interrupt context.
*/
int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
@@ -327,6 +346,22 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
* devm_clk_bulk_get_all - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
*
* Returns a positive value for the number of clocks obtained while the
* clock references are stored in the clk_bulk_data table in @clks field.
* Returns 0 if there're none and a negative value if something failed.
*
* This helper function allows drivers to get several clk
* consumers in one operation with management, the clks will
* automatically be freed when the device is unbound.
*/
int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
@@ -487,6 +522,19 @@ void clk_put(struct clk *clk);
*/
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
/**
* clk_bulk_put_all - "free" all the clock source
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table of consumer
*
* Note: drivers must ensure that all clk_bulk_enable calls made on this
* clock source are balanced by clk_bulk_disable calls prior to calling
* this function.
*
* clk_bulk_put_all should not be called from within interrupt context.
*/
void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
/**
* devm_clk_put - "free" a managed clock source
* @dev: device used to acquire the clock
@@ -629,6 +677,23 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
/**
* clk_save_context - save clock context for poweroff
*
* Saves the context of the clock register for powerstates in which the
* contents of the registers will be lost. Occurs deep within the suspend
* code so locking is not necessary.
*/
int clk_save_context(void);
/**
* clk_restore_context - restore clock context after poweroff
*
* This occurs with all clocks enabled. Occurs deep within the resume code
* so locking is not necessary.
*/
void clk_restore_context(void);
#else /* !CONFIG_HAVE_CLK */
static inline struct clk *clk_get(struct device *dev, const char *id)
@@ -642,6 +707,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
return 0;
}
static inline int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
return 0;
}
static inline struct clk *devm_clk_get(struct device *dev, const char *id)
{
return NULL;
@@ -653,6 +724,13 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk
return 0;
}
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
return 0;
}
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@@ -663,6 +741,8 @@ static inline void clk_put(struct clk *clk) {}
static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
@@ -728,6 +808,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return NULL;
}
static inline int clk_save_context(void)
{
return 0;
}
static inline void clk_restore_context(void) {}
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */

View File

@@ -1,14 +1,10 @@
/*
/* SPDX-License-Identifier: GPL-2.0+
*
* Copyright 2013 Ideas On Board SPRL
* Copyright 2013, 2014 Horms Solutions Ltd.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Contact: Simon Horman <horms@verge.net.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __LINUX_CLK_RENESAS_H_

View File

@@ -159,6 +159,7 @@ struct clk_hw_omap {
const char *clkdm_name;
struct clockdomain *clkdm;
const struct clk_hw_omap_ops *ops;
u32 context;
};
/*
@@ -290,9 +291,15 @@ struct ti_clk_features {
#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
#define TI_CLK_ERRATA_I810 BIT(3)
#define TI_CLK_CLKCTRL_COMPAT BIT(4)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
int omap3_noncore_dpll_save_context(struct clk_hw *hw);
void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
int omap3_core_dpll_save_context(struct clk_hw *hw);
void omap3_core_dpll_restore_context(struct clk_hw *hw);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;

View File

@@ -241,6 +241,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
__clocksource_update_freq_scale(cs, 1000, khz);
}
#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
extern void clocksource_arch_init(struct clocksource *cs);
#else
static inline void clocksource_arch_init(struct clocksource *cs) { }
#endif
extern int timekeeping_notify(struct clocksource *clock);
@@ -257,9 +262,6 @@ extern int clocksource_i8253_init(void);
#define TIMER_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1_RET(timer, name, compat, fn)
#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
TIMER_OF_DECLARE(name, compat, fn)
#ifdef CONFIG_TIMER_PROBE
extern void timer_probe(void);
#else

View File

@@ -7,7 +7,7 @@
*/
#include <linux/types.h>
#include <linux/compat_time.h>
#include <linux/time.h>
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
@@ -103,6 +103,9 @@ typedef struct compat_sigaltstack {
compat_size_t ss_size;
} compat_stack_t;
#endif
#ifndef COMPAT_MINSIGSTKSZ
#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
#endif
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -110,19 +113,12 @@ typedef struct compat_sigaltstack {
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;
typedef compat_ulong_t compat_aio_context_t;
struct compat_sel_arg_struct;
struct rusage;
struct compat_utimbuf {
compat_time_t actime;
compat_time_t modtime;
};
struct compat_itimerval {
struct compat_timeval it_interval;
struct compat_timeval it_value;
struct old_timeval32 it_interval;
struct old_timeval32 it_value;
};
struct itimerval;
@@ -146,7 +142,7 @@ struct compat_timex {
compat_long_t constant;
compat_long_t precision;
compat_long_t tolerance;
struct compat_timeval time;
struct old_timeval32 time;
compat_long_t tick;
compat_long_t ppsfreq;
compat_long_t jitter;
@@ -307,8 +303,8 @@ struct compat_rlimit {
};
struct compat_rusage {
struct compat_timeval ru_utime;
struct compat_timeval ru_stime;
struct old_timeval32 ru_utime;
struct old_timeval32 ru_stime;
compat_long_t ru_maxrss;
compat_long_t ru_ixrss;
compat_long_t ru_idrss;
@@ -452,13 +448,13 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
static inline int compat_timeval_compare(struct compat_timeval *lhs,
struct compat_timeval *rhs)
static inline int old_timeval32_compare(struct old_timeval32 *lhs,
struct old_timeval32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -467,8 +463,8 @@ static inline int compat_timeval_compare(struct compat_timeval *lhs,
return lhs->tv_usec - rhs->tv_usec;
}
static inline int compat_timespec_compare(struct compat_timespec *lhs,
struct compat_timespec *rhs)
static inline int old_timespec32_compare(struct old_timespec32 *lhs,
struct old_timespec32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -492,8 +488,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
/* fall through */
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
/* fall through */
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
/* fall through */
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
@@ -552,12 +551,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
struct compat_timespec __user *timeout);
struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
struct compat_timespec __user *timeout,
struct old_timespec32 __user *timeout,
const struct __compat_aio_sigset __user *usig);
/* fs/cookies.c */
@@ -642,11 +641,11 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
struct compat_timespec __user *tsp,
struct old_timespec32 __user *tsp,
void __user *sig);
asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
unsigned int nfds,
struct compat_timespec __user *tsp,
struct old_timespec32 __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
@@ -671,15 +670,15 @@ asmlinkage long compat_sys_newfstat(unsigned int fd,
/* fs/timerfd.c */
asmlinkage long compat_sys_timerfd_gettime(int ufd,
struct compat_itimerspec __user *otmr);
struct old_itimerspec32 __user *otmr);
asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
const struct compat_itimerspec __user *utmr,
struct compat_itimerspec __user *otmr);
const struct old_itimerspec32 __user *utmr,
struct old_itimerspec32 __user *otmr);
/* fs/utimes.c */
asmlinkage long compat_sys_utimensat(unsigned int dfd,
const char __user *filename,
struct compat_timespec __user *t,
struct old_timespec32 __user *t,
int flags);
/* kernel/exit.c */
@@ -691,7 +690,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t,
/* kernel/futex.c */
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
struct compat_timespec __user *utime, u32 __user *uaddr2,
struct old_timespec32 __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
@@ -701,8 +700,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
/* kernel/hrtimer.c */
asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
struct compat_timespec __user *rmtp);
asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
@@ -722,19 +721,19 @@ asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
struct compat_itimerspec __user *setting);
struct old_itimerspec32 __user *setting);
asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
struct compat_itimerspec __user *new,
struct compat_itimerspec __user *old);
struct old_itimerspec32 __user *new,
struct old_itimerspec32 __user *old);
asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
struct compat_timespec __user *tp);
struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
struct compat_timespec __user *tp);
struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
struct compat_timespec __user *tp);
struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
struct compat_timespec __user *rqtp,
struct compat_timespec __user *rmtp);
struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
@@ -748,7 +747,7 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
struct old_timespec32 __user *interval);
/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
@@ -768,7 +767,7 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
struct compat_siginfo __user *uinfo,
struct compat_timespec __user *uts, compat_size_t sigsetsize);
struct old_timespec32 __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
@@ -782,9 +781,9 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
/* kernel/time.c */
asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
@@ -798,11 +797,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
const char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int msg_prio,
const struct compat_timespec __user *u_abs_timeout);
const struct old_timespec32 __user *u_abs_timeout);
asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int __user *u_msg_prio,
const struct compat_timespec __user *u_abs_timeout);
const struct old_timespec32 __user *u_abs_timeout);
asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
const struct compat_sigevent __user *u_notification);
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
@@ -819,7 +818,7 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned nsems, const struct compat_timespec __user *timeout);
unsigned nsems, const struct old_timespec32 __user *timeout);
/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
@@ -876,7 +875,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
struct compat_siginfo __user *uinfo);
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
struct compat_timespec __user *timeout);
struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_wait4(compat_pid_t pid,
compat_uint_t __user *stat_addr, int options,
struct compat_rusage __user *ru);
@@ -928,7 +927,7 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
asmlinkage long compat_sys_open(const char __user *filename, int flags,
umode_t mode);
asmlinkage long compat_sys_utimes(const char __user *filename,
struct compat_timeval __user *t);
struct old_timeval32 __user *t);
/* __ARCH_WANT_SYSCALL_NO_FLAGS */
asmlinkage long compat_sys_signalfd(int ufd,
@@ -942,15 +941,15 @@ asmlinkage long compat_sys_newlstat(const char __user *filename,
struct compat_stat __user *statbuf);
/* __ARCH_WANT_SYSCALL_DEPRECATED */
asmlinkage long compat_sys_time(compat_time_t __user *tloc);
asmlinkage long compat_sys_time(old_time32_t __user *tloc);
asmlinkage long compat_sys_utime(const char __user *filename,
struct compat_utimbuf __user *t);
struct old_utimbuf32 __user *t);
asmlinkage long compat_sys_futimesat(unsigned int dfd,
const char __user *filename,
struct compat_timeval __user *t);
struct old_timeval32 __user *t);
asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
struct compat_timeval __user *tvp);
struct old_timeval32 __user *tvp);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
@@ -983,7 +982,7 @@ asmlinkage long compat_sys_sigaction(int sig,
#endif
/* obsolete: kernel/time/time.c */
asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
asmlinkage long compat_sys_stime(old_time32_t __user *tptr);
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
@@ -1002,15 +1001,15 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); }
#endif
/**
* ns_to_compat_timeval - Compat version of ns_to_timeval
* ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
*
* Returns the compat_timeval representation of the nsec parameter.
* Returns the old_timeval32 representation of the nsec parameter.
*/
static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
struct timeval tv;
struct compat_timeval ctv;
struct old_timeval32 ctv;
tv = ns_to_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
@@ -1033,9 +1032,9 @@ int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
#ifndef in_compat_syscall
/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */
#define in_compat_syscall in_compat_syscall
static inline bool in_compat_syscall(void) { return false; }
#endif
#endif /* CONFIG_COMPAT */

View File

@@ -1,32 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COMPAT_TIME_H
#define _LINUX_COMPAT_TIME_H
#include <linux/types.h>
#include <linux/time64.h>
typedef s32 compat_time_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_itimerspec {
struct compat_timespec it_interval;
struct compat_timespec it_value;
};
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
extern int get_compat_itimerspec64(struct itimerspec64 *its,
const struct compat_itimerspec __user *uits);
extern int put_compat_itimerspec64(const struct itimerspec64 *its,
struct compat_itimerspec __user *uits);
#endif /* _LINUX_COMPAT_TIME_H */

View File

@@ -21,8 +21,6 @@
#define __SANITIZE_ADDRESS__
#endif
#define __no_sanitize_address __attribute__((no_sanitize("address")))
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
@@ -41,6 +39,3 @@
* compilers, like ICC.
*/
#define barrier() __asm__ __volatile__("" : : : "memory")
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#define __assume_aligned(a, ...) \
__attribute__((__assume_aligned__(a, ## __VA_ARGS__)))

View File

@@ -68,31 +68,20 @@
*/
#define uninitialized_var(x) x = x
#ifdef __CHECKER__
#define __must_be_array(a) 0
#else
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif
#ifdef RETPOLINE
#define __noretpoline __attribute__((indirect_branch("keep")))
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
#define __optimize(level) __attribute__((__optimize__(level)))
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#ifndef __CHECKER__
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#define __compiletime_warning(message) __attribute__((__warning__(message)))
#define __compiletime_error(message) __attribute__((__error__(message)))
#ifdef LATENT_ENTROPY_PLUGIN
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
#endif /* __CHECKER__ */
/*
* calling noreturn functions, __builtin_unreachable() and __builtin_trap()
@@ -107,10 +96,6 @@
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
*
* Early snapshots of gcc 4.5 don't support this and we can't detect
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() \
do { \
@@ -119,9 +104,6 @@
__builtin_unreachable(); \
} while (0)
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
#define __randomize_layout __attribute__((randomize_layout))
#define __no_randomize_layout __attribute__((no_randomize_layout))
@@ -130,32 +112,6 @@
#define randomized_struct_fields_end } __randomize_layout;
#endif
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
* optimizer that something else uses this function or variable, thus preventing
* this.
*/
#define __visible __attribute__((externally_visible))
/* gcc version specific checks */
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
/*
* __assume_aligned(n, k): Tell the optimizer that the returned
* pointer can be assumed to be k modulo n. The second argument is
* optional (default 0), so we use a variadic macro to make the
* shorthand.
*
* Beware: Do not apply this to functions which may return
* ERR_PTRs. Also, it is probably unwise to apply it to functions
* returning extra information in the low bits (but in that case the
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*/
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
#endif
/*
* GCC 'asm goto' miscompiles certain code sequences:
*
@@ -187,32 +143,10 @@
#define KASAN_ABI_VERSION 3
#endif
#if GCC_VERSION >= 40902
/*
* Tell the compiler that address safety instrumentation (KASAN)
* should not be applied to that function.
* Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
*/
#define __no_sanitize_address __attribute__((no_sanitize_address))
#endif
#if GCC_VERSION >= 50100
/*
* Mark structures as requiring designated initializers.
* https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html
*/
#define __designated_init __attribute__((designated_init))
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
#if !defined(__no_sanitize_address)
#define __no_sanitize_address
#endif
/*
* Turn individual warnings and errors on and off locally, depending
* on version.

View File

@@ -29,17 +29,8 @@
*/
#define OPTIMIZER_HIDE_VAR(var) barrier()
/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
#define __must_be_array(a) 0
#endif
/* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__
#define __builtin_bswap16 _bswap16
/* The following are for compatibility with GCC, from compiler-gcc.h,
* and may be redefined here because they should not be shared with other
* compilers, like clang.
*/
#define __visible __attribute__((externally_visible))

View File

@@ -23,8 +23,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __branch_check__(x, expect, is_constant) ({ \
long ______r; \
static struct ftrace_likely_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \
__aligned(4) \
__section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
@@ -59,8 +59,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
({ \
int ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_branch"))) \
__aligned(4) \
__section("_ftrace_branch") \
______f = { \
.func = __func__, \
.file = __FILE__, \
@@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
asm volatile("%c0:\n\t" \
".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
asm volatile("ANNOTATE_REACHABLE counter=%c0" \
: : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
asm volatile("%c0:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
: : "i" (__COUNTER__)); \
})
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
#else
#define annotate_reachable()
#define annotate_unreachable()
@@ -124,7 +115,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define ASM_UNREACHABLE
#endif
#ifndef unreachable
# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
# define unreachable() do { \
annotate_unreachable(); \
__builtin_unreachable(); \
} while (0)
#endif
/*
@@ -146,7 +140,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
__attribute__((section("___kentry" "+" #sym ), used)) \
__section("___kentry" "+" #sym ) \
= (unsigned long)&sym;
#endif
@@ -195,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
@@ -287,7 +281,7 @@ unsigned long read_word_at_a_time(const void *addr)
* visible to the compiler.
*/
#define __ADDRESSABLE(sym) \
static void * __attribute__((section(".discard.addressable"), used)) \
static void * __section(".discard.addressable") __used \
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
/**
@@ -299,11 +293,46 @@ static inline void *offset_to_ptr(const int *off)
return (void *)((unsigned long)off + *off);
}
#endif /* __ASSEMBLY__ */
#else /* __ASSEMBLY__ */
#ifndef __optimize
# define __optimize(level)
#endif
#ifdef __KERNEL__
#ifndef LINKER_SCRIPT
#ifdef CONFIG_STACK_VALIDATION
.macro ANNOTATE_UNREACHABLE counter:req
\counter:
.pushsection .discard.unreachable
.long \counter\()b -.
.popsection
.endm
.macro ANNOTATE_REACHABLE counter:req
\counter:
.pushsection .discard.reachable
.long \counter\()b -.
.popsection
.endm
.macro ASM_UNREACHABLE
999:
.pushsection .discard.unreachable
.long 999b - .
.popsection
.endm
#else /* CONFIG_STACK_VALIDATION */
.macro ANNOTATE_UNREACHABLE counter:req
.endm
.macro ANNOTATE_REACHABLE counter:req
.endm
.macro ASM_UNREACHABLE
.endm
#endif /* CONFIG_STACK_VALIDATION */
#endif /* LINKER_SCRIPT */
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
@@ -314,29 +343,14 @@ static inline void *offset_to_ptr(const int *off)
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
/*
* Sparse complains of variable sized arrays due to the temporary variable in
* __compiletime_assert. Unfortunately we can't just expand it out to make
* sparse see a constant array size without breaking compiletime_assert on old
* versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
*/
# ifndef __CHECKER__
# define __compiletime_error_fallback(condition) \
do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
# endif
#endif
#ifndef __compiletime_error_fallback
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
int __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
if (__cond) \
if (!(condition)) \
prefix ## suffix(); \
__compiletime_error_fallback(__cond); \
} while (0)
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
@@ -361,4 +375,7 @@ static inline void *offset_to_ptr(const int *off)
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif /* __LINUX_COMPILER_H */

View File

@@ -0,0 +1,262 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_ATTRIBUTES_H
#define __LINUX_COMPILER_ATTRIBUTES_H
/*
* The attributes in this file are unconditionally defined and they directly
* map to compiler attribute(s), unless one of the compilers does not support
* the attribute. In that case, __has_attribute is used to check for support
* and the reason is stated in its comment ("Optional: ...").
*
* Any other "attributes" (i.e. those that depend on a configuration option,
* on a compiler, on an architecture, on plugins, on other attributes...)
* should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
* The intention is to keep this file as simple as possible, as well as
* compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
*
* This file is meant to be sorted (by actual attribute name,
* not by #define identifier). Use the __attribute__((__name__)) syntax
* (i.e. with underscores) to avoid future collisions with other macros.
* Provide links to the documentation of each supported compiler, if it exists.
*/
/*
* __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
* In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
* by hand.
*
* sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
* depending on the compiler used to build it; however, these attributes have
* no semantic effects for sparse, so it does not matter. Also note that,
* in order to avoid sparse's warnings, even the unsupported ones must be
* defined to 0.
*/
#ifndef __has_attribute
# define __has_attribute(x) __GCC4_has_attribute_##x
# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
# define __GCC4_has_attribute___designated_init__ 0
# define __GCC4_has_attribute___externally_visible__ 1
# define __GCC4_has_attribute___noclone__ 1
# define __GCC4_has_attribute___optimize__ 1
# define __GCC4_has_attribute___nonstring__ 0
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
#endif
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
*/
#define __alias(symbol) __attribute__((__alias__(#symbol)))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute
*/
#define __aligned(x) __attribute__((__aligned__(x)))
#define __aligned_largest __attribute__((__aligned__))
/*
* Note: users of __always_inline currently do not write "inline" themselves,
* which seems to be required by gcc to apply the attribute according
* to its docs (and also "warning: always_inline function might not be
* inlinable [-Wattributes]" is emitted).
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute
* clang: mentioned
*/
#define __always_inline inline __attribute__((__always_inline__))
/*
* The second argument is optional (default 0), so we use a variadic macro
* to make the shorthand.
*
* Beware: Do not apply this to functions which may return
* ERR_PTRs. Also, it is probably unwise to apply it to functions
* returning extra information in the low bits (but in that case the
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*
* Optional: only supported since gcc >= 4.9
* Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
*/
#if __has_attribute(__assume_aligned__)
# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
#else
# define __assume_aligned(a, ...)
#endif
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
*/
#define __cold __attribute__((__cold__))
/*
* Note the long name.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
*/
#define __attribute_const__ __attribute__((__const__))
/*
* Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
* attribute warnings entirely and for good") for more information.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated
*/
#define __deprecated
/*
* Optional: only supported since gcc >= 5.1
* Optional: not supported by clang
* Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
*/
#if __has_attribute(__designated_init__)
# define __designated_init __attribute__((__designated_init__))
#else
# define __designated_init
#endif
/*
* Optional: not supported by clang
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
*/
#if __has_attribute(__externally_visible__)
# define __visible __attribute__((__externally_visible__))
#else
# define __visible
#endif
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#format
*/
#define __printf(a, b) __attribute__((__format__(printf, a, b)))
#define __scanf(a, b) __attribute__((__format__(scanf, a, b)))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline
*/
#define __gnu_inline __attribute__((__gnu_inline__))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
*/
#define __malloc __attribute__((__malloc__))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute
*/
#define __mode(x) __attribute__((__mode__(x)))
/*
* Optional: not supported by clang
* Note: icc does not recognize gcc's no-tracer
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute
*/
#if __has_attribute(__noclone__)
# if __has_attribute(__optimize__)
# define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
# else
# define __noclone __attribute__((__noclone__))
# endif
#else
# define __noclone
#endif
/*
* Note the missing underscores.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
* clang: mentioned
*/
#define noinline __attribute__((__noinline__))
/*
* Optional: only supported since gcc >= 8
* Optional: not supported by clang
* Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
*/
#if __has_attribute(__nonstring__)
# define __nonstring __attribute__((__nonstring__))
#else
# define __nonstring
#endif
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
* clang: https://clang.llvm.org/docs/AttributeReference.html#id1
*/
#define __noreturn __attribute__((__noreturn__))
/*
* Optional: only supported since gcc >= 4.8
* Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fsanitize_005faddress-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#no-sanitize-address-no-address-safety-analysis
*/
#if __has_attribute(__no_sanitize_address__)
# define __no_sanitize_address __attribute__((__no_sanitize_address__))
#else
# define __no_sanitize_address
#endif
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
* clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
*/
#define __packed __attribute__((__packed__))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
*/
#define __pure __attribute__((__pure__))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
*/
#define __section(S) __attribute__((__section__(#S)))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused
*/
#define __always_unused __attribute__((__unused__))
#define __maybe_unused __attribute__((__unused__))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute
*/
#define __used __attribute__((__used__))
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
*/
#define __weak __attribute__((__weak__))
#endif /* __LINUX_COMPILER_ATTRIBUTES_H */

View File

@@ -1,3 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
@@ -54,6 +55,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#ifdef __KERNEL__
/* Attributes */
#include <linux/compiler_attributes.h>
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
@@ -78,12 +82,6 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#include <asm/compiler.h>
#endif
/*
* Generic compiler-independent macros required for kernel
* build go below this comment. Actual compiler/compiler version
* specific implementations come from the above header files
*/
struct ftrace_branch_data {
const char *func;
const char *file;
@@ -106,10 +104,6 @@ struct ftrace_likely_data {
unsigned long constant;
};
/* Don't. Just don't. */
#define __deprecated
#define __deprecated_for_modules
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -119,10 +113,6 @@ struct ftrace_likely_data {
* compilers. We don't consider that to be an error, so set them to nothing.
* For example, some of them are for compiler specific plugins.
*/
#ifndef __designated_init
# define __designated_init
#endif
#ifndef __latent_entropy
# define __latent_entropy
#endif
@@ -140,15 +130,8 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
#ifndef __visible
#define __visible
#endif
/*
* Assume alignment of return value.
*/
#ifndef __assume_aligned
#define __assume_aligned(a, ...)
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */
@@ -159,14 +142,6 @@ struct ftrace_likely_data {
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#ifndef __attribute_const__
#define __attribute_const__ __attribute__((__const__))
#endif
#ifndef __noclone
#define __noclone
#endif
/* Helpers for emitting diagnostics in pragmas. */
#ifndef __diag
#define __diag(string)
@@ -186,44 +161,16 @@ struct ftrace_likely_data {
#define __diag_error(compiler, version, option, comment) \
__diag_ ## compiler(version, error, option)
/*
* From the GCC manual:
*
* Many functions have no effects except the return value and their
* return value depends only on the parameters and/or global
* variables. Such a function can be subject to common subexpression
* elimination and loop optimization just as an arithmetic operator
* would be.
* [...]
*/
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
#define __aligned_largest __attribute__((aligned))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
#define __mode(x) __attribute__((mode(x)))
#define __malloc __attribute__((__malloc__))
#define __used __attribute__((__used__))
#define __noreturn __attribute__((noreturn))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
#define __cold __attribute__((cold))
#define __section(S) __attribute__((__section__(#S)))
#ifdef CONFIG_ENABLE_MUST_CHECK
#define __must_check __attribute__((warn_unused_result))
#define __must_check __attribute__((__warn_unused_result__))
#else
#define __must_check
#endif
#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
#if defined(CC_USING_HOTPATCH)
#define notrace __attribute__((hotpatch(0, 0)))
#else
#define notrace __attribute__((no_instrument_function))
#define notrace __attribute__((__no_instrument_function__))
#endif
/*
@@ -232,22 +179,10 @@ struct ftrace_likely_data {
* stack and frame pointer being set up and there is no chance to
* restore the lr register to the value before mcount was called.
*/
#define __naked __attribute__((naked)) notrace
#define __naked __attribute__((__naked__)) notrace
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
/*
* Feature detection for gnu_inline (gnu89 extern inline semantics). Either
* __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
* and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
* defined so the gnu89 semantics are the default.
*/
#ifdef __GNUC_STDC_INLINE__
# define __gnu_inline __attribute__((gnu_inline))
#else
# define __gnu_inline
#endif
/*
* Force always-inline if the user requests it so via the .config.
* GCC does not warn about unused static inline functions for
@@ -259,22 +194,20 @@ struct ftrace_likely_data {
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
* Do not use __always_inline here, since currently it expands to inline again
* (which would break users of __always_inline).
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING)
#define inline \
inline __attribute__((always_inline, unused)) notrace __gnu_inline
#define inline inline __attribute__((__always_inline__)) __gnu_inline \
__maybe_unused notrace
#else
#define inline inline __attribute__((unused)) notrace __gnu_inline
#define inline inline __gnu_inline \
__maybe_unused notrace
#endif
#define __inline__ inline
#define __inline inline
#define noinline __attribute__((noinline))
#ifndef __always_inline
#define __always_inline inline __attribute__((always_inline))
#endif
#define __inline inline
/*
* Rather then using noinline to prevent stack consumption, use

View File

@@ -17,9 +17,9 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
extern void do_coredump(const kernel_siginfo_t *siginfo);
#else
static inline void do_coredump(const siginfo_t *siginfo) {}
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#endif
#endif /* _LINUX_COREDUMP_H */

View File

@@ -94,20 +94,15 @@ union coresight_dev_subtype {
* @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
* @name: name of the component as shown under sysfs.
* @nr_inport: number of input ports for this component.
* @outports: list of remote endpoint port number.
* @child_names:name of all child components connected to this device.
* @child_ports:child component port number the current component is
connected to.
* @nr_outport: number of output ports for this component.
* @conns: Array of nr_outport connections from this component
*/
struct coresight_platform_data {
int cpu;
const char *name;
int nr_inport;
int *outports;
const char **child_names;
int *child_ports;
int nr_outport;
struct coresight_connection *conns;
};
/**
@@ -190,23 +185,15 @@ struct coresight_device {
* @disable: disables the sink.
* @alloc_buffer: initialises perf's ring buffer for trace collection.
* @free_buffer: release memory allocated in @get_config.
* @set_buffer: initialises buffer mechanic before a trace session.
* @reset_buffer: finalises buffer mechanic after a trace session.
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
int (*enable)(struct coresight_device *csdev, u32 mode);
int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
void (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
void **pages, int nr_pages, bool overwrite);
void (*free_buffer)(void *config);
int (*set_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
unsigned long (*reset_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
void (*update_buffer)(struct coresight_device *csdev,
unsigned long (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
};
@@ -270,6 +257,13 @@ extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
extern int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value);
extern int coresight_claim_device(void __iomem *base);
extern int coresight_claim_device_unlocked(void __iomem *base);
extern void coresight_disclaim_device(void __iomem *base);
extern void coresight_disclaim_device_unlocked(void __iomem *base);
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -279,6 +273,19 @@ coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
static inline int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value) { return 1; }
static inline int coresight_claim_device_unlocked(void __iomem *base)
{
return -EINVAL;
}
static inline int coresight_claim_device(void __iomem *base)
{
return -EINVAL;
}
static inline void coresight_disclaim_device(void __iomem *base) {}
static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
#endif
#ifdef CONFIG_OF

View File

@@ -45,7 +45,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
static struct cpu_feature const cpu_feature_match_ ## x[] = \
static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\

View File

@@ -126,6 +126,7 @@ enum cpuhp_state {
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
CPUHP_AP_KVM_ARM_VGIC_STARTING,

View File

@@ -81,6 +81,7 @@ struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
unsigned int use_deepest_state:1;
unsigned int poll_time_limit:1;
unsigned int cpu;
int last_residency;
@@ -99,16 +100,6 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
/**
* cpuidle_get_last_residency - retrieves the last state's residency time
* @dev: the target CPU
*/
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
{
return dev->last_residency;
}
/****************************
* CPUIDLE DRIVER INTERFACE *
****************************/

View File

@@ -26,6 +26,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
size_t csize, unsigned long offset,
int userbuf);
void vmcore_cleanup(void);
/* Architecture code defines this if there are other possible ELF

View File

@@ -6,6 +6,7 @@
#define CRC_T10DIF_DIGEST_SIZE 2
#define CRC_T10DIF_BLOCK_SIZE 1
#define CRC_T10DIF_STRING "crct10dif"
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
size_t len);

View File

@@ -454,6 +454,33 @@ struct compress_alg {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
* All following statistics are for this crypto_alg
* @encrypt_cnt: number of encrypt requests
* @decrypt_cnt: number of decrypt requests
* @compress_cnt: number of compress requests
* @decompress_cnt: number of decompress requests
* @generate_cnt: number of RNG generate requests
* @seed_cnt: number of times the rng was seeded
* @hash_cnt: number of hash requests
* @sign_cnt: number of sign requests
* @setsecret_cnt: number of setsecrey operation
* @generate_public_key_cnt: number of generate_public_key operation
* @verify_cnt: number of verify operation
* @compute_shared_secret_cnt: number of compute_shared_secret operation
* @encrypt_tlen: total data size handled by encrypt requests
* @decrypt_tlen: total data size handled by decrypt requests
* @compress_tlen: total data size handled by compress requests
* @decompress_tlen: total data size handled by decompress requests
* @generate_tlen: total data size of generated data by the RNG
* @hash_tlen: total data size hashed
* @akcipher_err_cnt: number of error for akcipher requests
* @cipher_err_cnt: number of error for akcipher requests
* @compress_err_cnt: number of error for akcipher requests
* @aead_err_cnt: number of error for akcipher requests
* @hash_err_cnt: number of error for akcipher requests
* @rng_err_cnt: number of error for akcipher requests
* @kpp_err_cnt: number of error for akcipher requests
*
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
@@ -487,6 +514,45 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
union {
atomic_t encrypt_cnt;
atomic_t compress_cnt;
atomic_t generate_cnt;
atomic_t hash_cnt;
atomic_t setsecret_cnt;
};
union {
atomic64_t encrypt_tlen;
atomic64_t compress_tlen;
atomic64_t generate_tlen;
atomic64_t hash_tlen;
};
union {
atomic_t akcipher_err_cnt;
atomic_t cipher_err_cnt;
atomic_t compress_err_cnt;
atomic_t aead_err_cnt;
atomic_t hash_err_cnt;
atomic_t rng_err_cnt;
atomic_t kpp_err_cnt;
};
union {
atomic_t decrypt_cnt;
atomic_t decompress_cnt;
atomic_t seed_cnt;
atomic_t generate_public_key_cnt;
};
union {
atomic64_t decrypt_tlen;
atomic64_t decompress_tlen;
};
union {
atomic_t verify_cnt;
atomic_t compute_shared_secret_cnt;
};
atomic_t sign_cnt;
} CRYPTO_MINALIGN_ATTR;
/*
@@ -907,6 +973,38 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
return __crypto_ablkcipher_cast(req->base.tfm);
}
static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
int ret)
{
#ifdef CONFIG_CRYPTO_STATS
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
} else {
atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt);
atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
}
#endif
}
static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
int ret)
{
#ifdef CONFIG_CRYPTO_STATS
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
} else {
atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt);
atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
}
#endif
}
/**
* crypto_ablkcipher_encrypt() - encrypt plaintext
* @req: reference to the ablkcipher_request handle that holds all information
@@ -922,7 +1020,11 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
return crt->encrypt(req);
int ret;
ret = crt->encrypt(req);
crypto_stat_ablkcipher_encrypt(req, ret);
return ret;
}
/**
@@ -940,7 +1042,11 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
return crt->decrypt(req);
int ret;
ret = crt->decrypt(req);
crypto_stat_ablkcipher_decrypt(req, ret);
return ret;
}
/**

View File

@@ -8,6 +8,7 @@
#ifndef _LINUX_CUDA_H
#define _LINUX_CUDA_H
#include <linux/rtc.h>
#include <uapi/linux/cuda.h>
@@ -16,4 +17,7 @@ extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
extern time64_t cuda_get_time(void);
extern int cuda_set_rtc_time(struct rtc_time *tm);
#endif /* _LINUX_CUDA_H */

View File

@@ -8,8 +8,8 @@
struct task_struct;
extern int debug_locks;
extern int debug_locks_silent;
extern int debug_locks __read_mostly;
extern int debug_locks_silent __read_mostly;
static inline int __debug_locks_off(void)

View File

@@ -57,7 +57,12 @@ struct task_delay_info {
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
u64 thrashing_start;
u64 thrashing_delay; /* wait for thrashing page */
u32 freepages_count; /* total count of memory reclaim */
u32 thrashing_count; /* total count of thrash waits */
};
#endif
@@ -76,6 +81,8 @@ extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
extern void __delayacct_thrashing_start(void);
extern void __delayacct_thrashing_end(void);
static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
{
@@ -156,6 +163,18 @@ static inline void delayacct_freepages_end(void)
__delayacct_freepages_end();
}
static inline void delayacct_thrashing_start(void)
{
if (current->delays)
__delayacct_thrashing_start();
}
static inline void delayacct_thrashing_end(void)
{
if (current->delays)
__delayacct_thrashing_end();
}
#else
static inline void delayacct_set_flag(int flag)
{}
@@ -182,6 +201,10 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
static inline void delayacct_thrashing_start(void)
{}
static inline void delayacct_thrashing_end(void)
{}
#endif /* CONFIG_TASK_DELAY_ACCT */

View File

@@ -198,6 +198,14 @@ extern void devm_devfreq_remove_device(struct device *dev,
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
/**
* update_devfreq() - Reevaluate the device and configure frequency
* @devfreq: the devfreq device
*
* Note: devfreq->lock must be held
*/
extern int update_devfreq(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags);

View File

@@ -26,9 +26,8 @@ enum dm_queue_mode {
DM_TYPE_NONE = 0,
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_MQ_REQUEST_BASED = 3,
DM_TYPE_DAX_BIO_BASED = 4,
DM_TYPE_NVME_BIO_BASED = 5,
DM_TYPE_DAX_BIO_BASED = 3,
DM_TYPE_NVME_BIO_BASED = 4,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -92,6 +91,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
struct blk_zone *zones,
unsigned int *nr_zones,
gfp_t gfp_mask);
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
@@ -180,6 +184,9 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
#ifdef CONFIG_BLK_DEV_ZONED
dm_report_zones_fn report_zones;
#endif
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
@@ -420,8 +427,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
sector_t start);
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
@@ -490,6 +497,7 @@ sector_t dm_table_get_size(struct dm_table *t);
unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
const char *dm_table_device_name(struct dm_table *t);
/*
* Trigger an event.

View File

@@ -55,6 +55,8 @@ struct bus_attribute {
struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
#define BUS_ATTR_RO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
#define BUS_ATTR_WO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
extern int __must_check bus_create_file(struct bus_type *,
struct bus_attribute *);
@@ -692,8 +694,10 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
extern void devm_kfree(struct device *dev, void *p);
extern void devm_kfree(struct device *dev, const void *p);
extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
extern const char *devm_kstrdup_const(struct device *dev,
const char *s, gfp_t gfp);
extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
gfp_t gfp);
@@ -773,6 +777,30 @@ struct device *device_connection_find(struct device *dev, const char *con_id);
void device_connection_add(struct device_connection *con);
void device_connection_remove(struct device_connection *con);
/**
* device_connections_add - Add multiple device connections at once
* @cons: Zero terminated array of device connection descriptors
*/
static inline void device_connections_add(struct device_connection *cons)
{
struct device_connection *c;
for (c = cons; c->endpoint[0]; c++)
device_connection_add(c);
}
/**
* device_connections_remove - Remove multiple device connections at once
* @cons: Zero terminated array of device connection descriptors
*/
static inline void device_connections_remove(struct device_connection *cons)
{
struct device_connection *c;
for (c = cons; c->endpoint[0]; c++)
device_connection_remove(c);
}
/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
@@ -927,6 +955,8 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -1016,6 +1046,11 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
bool dma_coherent:1;
#endif
};
static inline struct device *kobj_to_dev(struct kobject *kobj)

View File

@@ -32,6 +32,9 @@ extern void dma_debug_add_bus(struct bus_type *bus);
extern int dma_debug_resize_entries(u32 num_entries);
extern void debug_dma_map_single(struct device *dev, const void *addr,
unsigned long len);
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
@@ -103,6 +106,11 @@ static inline int dma_debug_resize_entries(u32 num_entries)
return 0;
}
static inline void debug_dma_map_single(struct device *dev, const void *addr,
unsigned long len)
{
}
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,

View File

@@ -5,6 +5,8 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
#define DIRECT_MAPPING_ERROR 0
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#else
@@ -27,7 +29,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return false;
return addr + size - 1 <= *dev->dma_mask;
return addr + size - 1 <=
min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
@@ -55,10 +58,15 @@ static inline void dma_mark_clean(void *addr, size_t size)
}
#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
void *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);

View File

@@ -130,13 +130,10 @@ struct dma_map_ops {
enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
#endif
};
extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_noncoherent_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -232,6 +229,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
debug_dma_map_single(dev, ptr, size);
addr = ops->map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
dir, attrs);
@@ -445,7 +443,8 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
@@ -477,14 +476,14 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
BUG_ON(!ops);
if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs);
static inline int
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
@@ -496,7 +495,8 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
attrs);
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
attrs);
}
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
@@ -558,9 +558,11 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
dma_addr_t *dma_handle, gfp_t gfp)
{
return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
return dma_alloc_attrs(dev, size, dma_handle, gfp,
(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
@@ -753,18 +755,6 @@ dma_mark_declared_memory_occupied(struct device *dev,
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
#ifdef CONFIG_HAS_DMA
int dma_configure(struct device *dev);
void dma_deconfigure(struct device *dev);
#else
static inline int dma_configure(struct device *dev)
{
return 0;
}
static inline void dma_deconfigure(struct device *dev) {}
#endif
/*
* Managed DMA API
*/
@@ -806,8 +796,12 @@ static inline void dmam_release_declared_memory(struct device *dev)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
return dma_alloc_attrs(dev, size, dma_addr, gfp,
DMA_ATTR_WRITE_COMBINE);
unsigned long attrs = DMA_ATTR_NO_WARN;
if (gfp & __GFP_NOWARN)
attrs |= DMA_ATTR_NO_WARN;
return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}
#ifndef dma_alloc_writecombine
#define dma_alloc_writecombine dma_alloc_wc

View File

@@ -4,18 +4,35 @@
#include <linux/dma-mapping.h>
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h>
#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
static inline bool dev_is_dma_coherent(struct device *dev)
{
return dev->dma_coherent;
}
#else
static inline bool dev_is_dma_coherent(struct device *dev)
{
return true;
}
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr);
#ifdef CONFIG_DMA_NONCOHERENT_MMAP
int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs);
#else
#define arch_dma_mmap NULL
#endif /* CONFIG_DMA_NONCOHERENT_MMAP */
# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
#endif
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,

View File

@@ -58,4 +58,73 @@ enum sprd_dma_int_type {
SPRD_DMA_CFGERR_INT,
};
/*
* struct sprd_dma_linklist - DMA link-list address structure
* @virt_addr: link-list virtual address to configure link-list node
* @phy_addr: link-list physical address to link DMA transfer
*
* The Spreadtrum DMA controller supports the link-list mode, that means slaves
* can supply several groups configurations (each configuration represents one
* DMA transfer) saved in memory, and DMA controller will link these groups
* configurations by writing the physical address of each configuration into the
* link-list register.
*
* Just as shown below, the link-list pointer register will be pointed to the
* physical address of 'configuration 1', and the 'configuration 1' link-list
* pointer will be pointed to 'configuration 2', and so on.
* Once trigger the DMA transfer, the DMA controller will load 'configuration
* 1' to its registers automatically, after 'configuration 1' transaction is
* done, DMA controller will load 'configuration 2' automatically, until all
* DMA transactions are done.
*
* Note: The last link-list pointer should point to the physical address
* of 'configuration 1', which can avoid DMA controller loads incorrect
* configuration when the last configuration transaction is done.
*
* DMA controller linklist memory
* ====================== -----------------------
*| | | configuration 1 |<---
*| DMA controller | ------->| | |
*| | | | | |
*| | | | | |
*| | | | | |
*| linklist pointer reg |---- ----| linklist pointer | |
* ====================== | ----------------------- |
* | |
* | ----------------------- |
* | | configuration 2 | |
* --->| | |
* | | |
* | | |
* | | |
* ----| linklist pointer | |
* | ----------------------- |
* | |
* | ----------------------- |
* | | configuration 3 | |
* --->| | |
* | | |
* | . | |
* . |
* . |
* . |
* | . |
* | ----------------------- |
* | | configuration n | |
* --->| | |
* | | |
* | | |
* | | |
* | linklist pointer |----
* -----------------------
*
* To support the link-list mode, DMA slaves should allocate one segment memory
* from always-on IRAM or dma coherent memory to store these groups of DMA
* configuration, and pass the virtual and physical address to DMA controller.
*/
struct sprd_dma_linklist {
unsigned long virt_addr;
phys_addr_t phy_addr;
};
#endif

View File

@@ -24,11 +24,9 @@
#ifndef _LINUX_DNS_RESOLVER_H
#define _LINUX_DNS_RESOLVER_H
#ifdef __KERNEL__
#include <uapi/linux/dns_resolver.h>
extern int dns_query(const char *type, const char *name, size_t namelen,
const char *options, char **_result, time64_t *_expiry);
#endif /* KERNEL */
#endif /* _LINUX_DNS_RESOLVER_H */

View File

@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/numa.h>
#define EDAC_DEVICE_NAME_LEN 31
@@ -451,6 +452,8 @@ struct dimm_info {
u32 nr_pages; /* number of pages on this dimm */
unsigned csrow, cschannel; /* Points to the old API data */
u16 smbios_handle; /* Handle for SMBIOS type 17 */
};
/**
@@ -670,6 +673,6 @@ struct mem_ctl_info {
/*
* Maximum number of memory controllers in the coherent fabric.
*/
#define EDAC_MAX_MCS 16
#define EDAC_MAX_MCS 2 * MAX_NUMNODES
#endif

View File

@@ -672,6 +672,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
typedef struct {
efi_guid_t guid;
@@ -957,6 +958,7 @@ extern struct efi {
unsigned long mem_attr_table; /* memory attributes table */
unsigned long rng_seed; /* UEFI firmware random seed */
unsigned long tpm_log; /* TPM2 Event Log table */
unsigned long mem_reserve; /* Linux EFI memreserve table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1041,6 +1043,7 @@ extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_reserve_boot_services(void);
@@ -1164,6 +1167,8 @@ static inline bool efi_enabled(int feature)
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
extern bool efi_is_table_address(unsigned long phys_addr);
extern int efi_apply_persistent_mem_reservations(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1182,6 +1187,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
static inline int efi_apply_persistent_mem_reservations(void)
{
return 0;
}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1659,7 +1669,55 @@ struct linux_efi_tpm_eventlog {
extern int efi_tpm_eventlog_init(void);
/*
* efi_runtime_service() function identifiers.
* "NONE" is used by efi_recover_from_page_fault() to check if the page
* fault happened while executing an efi runtime service.
*/
enum efi_rts_ids {
NONE,
GET_TIME,
SET_TIME,
GET_WAKEUP_TIME,
SET_WAKEUP_TIME,
GET_VARIABLE,
GET_NEXT_VARIABLE,
SET_VARIABLE,
QUERY_VARIABLE_INFO,
GET_NEXT_HIGH_MONO_COUNT,
RESET_SYSTEM,
UPDATE_CAPSULE,
QUERY_CAPSULE_CAPS,
};
/*
* efi_runtime_work: Details of EFI Runtime Service work
* @arg<1-5>: EFI Runtime Service function arguments
* @status: Status of executing EFI Runtime Service
* @efi_rts_id: EFI Runtime Service function identifier
* @efi_rts_comp: Struct used for handling completions
*/
struct efi_runtime_work {
void *arg1;
void *arg2;
void *arg3;
void *arg4;
void *arg5;
efi_status_t status;
struct work_struct work;
enum efi_rts_ids efi_rts_id;
struct completion efi_rts_comp;
};
extern struct efi_runtime_work efi_rts_work;
/* Workqueue to queue EFI Runtime Services */
extern struct workqueue_struct *efi_rts_wq;
struct linux_efi_memreserve {
phys_addr_t next;
phys_addr_t base;
phys_addr_t size;
};
#endif /* _LINUX_EFI_H */

View File

@@ -111,7 +111,7 @@ struct elevator_mq_ops {
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
void (*completed_request)(struct request *);
void (*completed_request)(struct request *, u64);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);

View File

@@ -27,10 +27,10 @@ struct compat_elf_prstatus
compat_pid_t pr_ppid;
compat_pid_t pr_pgrp;
compat_pid_t pr_sid;
struct compat_timeval pr_utime;
struct compat_timeval pr_stime;
struct compat_timeval pr_cutime;
struct compat_timeval pr_cstime;
struct old_timeval32 pr_utime;
struct old_timeval32 pr_stime;
struct old_timeval32 pr_cutime;
struct old_timeval32 pr_cstime;
compat_elf_gregset_t pr_reg;
#ifdef CONFIG_BINFMT_ELF_FDPIC
compat_ulong_t pr_exec_fdpic_loadmap;

View File

@@ -183,14 +183,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
/**
* struct ethtool_ops - optional netdev operations
* @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
* API. Get various device settings including Ethernet link
* settings. The @cmd parameter is expected to have been cleared
* before get_settings is called. Returns a negative error code
* or zero.
* @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
* API. Set various device settings including Ethernet link
* settings. Returns a negative error code or zero.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
* implemented, the @driver and @bus_info fields will be filled in
@@ -297,19 +289,16 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
* queue has this number, ignore the inapplicable fields.
* Returns a negative error code or zero.
* @get_link_ksettings: When defined, takes precedence over the
* %get_settings method. Get various device settings
* including Ethernet link settings. The %cmd and
* %link_mode_masks_nwords fields should be ignored (use
* %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
* change to them will be overwritten by kernel. Returns a
* negative error code or zero.
* @set_link_ksettings: When defined, takes precedence over the
* %set_settings method. Set various device settings including
* Ethernet link settings. The %cmd and %link_mode_masks_nwords
* fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
* instead of the latter), any change to them will be overwritten
* by kernel. Returns a negative error code or zero.
* @get_link_ksettings: Get various device settings including Ethernet link
* settings. The %cmd and %link_mode_masks_nwords fields should be
* ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
* any change to them will be overwritten by kernel. Returns a negative
* error code or zero.
* @set_link_ksettings: Set various device settings including Ethernet link
* settings. The %cmd and %link_mode_masks_nwords fields should be
* ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
* any change to them will be overwritten by kernel. Returns a negative
* error code or zero.
* @get_fecparam: Get the network device Forward Error Correction parameters.
* @set_fecparam: Set the network device Forward Error Correction parameters.
* @get_ethtool_phy_stats: Return extended statistics about the PHY device.
@@ -329,8 +318,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
int (*get_settings)(struct net_device *, struct ethtool_cmd *);
int (*set_settings)(struct net_device *, struct ethtool_cmd *);
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);

View File

@@ -1,12 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/**
* include/linux/f2fs_fs.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _LINUX_F2FS_FS_H
#define _LINUX_F2FS_FS_H
@@ -112,12 +109,15 @@ struct f2fs_super_block {
struct f2fs_device devs[MAX_DEVICES]; /* device list */
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
__u8 hot_ext_count; /* # of hot file extension */
__u8 reserved[314]; /* valid reserved region */
__u8 reserved[310]; /* valid reserved region */
__le32 crc; /* checksum of superblock */
} __packed;
/*
* For checkpoint
*/
#define CP_DISABLED_FLAG 0x00001000
#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100

View File

@@ -4,6 +4,61 @@
#include <uapi/linux/fanotify.h>
/* not valid from userspace, only kernel internal */
#define FAN_MARK_ONDIR 0x00000100
#define FAN_GROUP_FLAG(group, flag) \
((group)->fanotify_data.flags & (flag))
/*
* Flags allowed to be passed from/to userspace.
*
* We intentionally do not add new bits to the old FAN_ALL_* constants, because
* they are uapi exposed constants. If there are programs out there using
* these constant, the programs may break if re-compiled with new uapi headers
* and then run on an old kernel.
*/
#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
FAN_CLASS_PRE_CONTENT)
#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \
FAN_REPORT_TID | \
FAN_CLOEXEC | FAN_NONBLOCK | \
FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
FAN_MARK_FILESYSTEM)
#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
FAN_MARK_ADD | \
FAN_MARK_REMOVE | \
FAN_MARK_DONT_FOLLOW | \
FAN_MARK_ONLYDIR | \
FAN_MARK_IGNORED_MASK | \
FAN_MARK_IGNORED_SURV_MODIFY | \
FAN_MARK_FLUSH)
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \
FAN_CLOSE | FAN_OPEN)
/* Events that require a permission response from user */
#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM)
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
/* Events that may be reported to user */
#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \
FANOTIFY_PERM_EVENTS | \
FAN_Q_OVERFLOW)
#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
FANOTIFY_EVENT_FLAGS)
/* Do not use these old uapi constants internally */
#undef FAN_ALL_CLASS_BITS
#undef FAN_ALL_INIT_FLAGS
#undef FAN_ALL_MARK_FLAGS
#undef FAN_ALL_EVENTS
#undef FAN_ALL_PERM_EVENTS
#undef FAN_ALL_OUTGOING_EVENTS
#endif /* _LINUX_FANOTIFY_H */

View File

@@ -520,24 +520,6 @@ struct bpf_skb_data_end {
void *data_end;
};
struct sk_msg_buff {
void *data;
void *data_end;
__u32 apply_bytes;
__u32 cork_bytes;
int sg_copybreak;
int sg_start;
int sg_curr;
int sg_end;
struct scatterlist sg_data[MAX_SKB_FRAGS];
bool sg_copy[MAX_SKB_FRAGS];
__u32 flags;
struct sock *sk_redir;
struct sock *sk;
struct sk_buff *skb;
struct list_head list;
};
struct bpf_redirect_info {
u32 ifindex;
u32 flags;
@@ -566,6 +548,27 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
/* Similar to bpf_compute_data_pointers(), except that save orginal
* data in cb->data and cb->meta_data for restore.
*/
static inline void bpf_compute_and_save_data_end(
struct sk_buff *skb, void **saved_data_end)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
*saved_data_end = cb->data_end;
cb->data_end = skb->data + skb_headlen(skb);
}
/* Restore data saved by bpf_compute_data_pointers(). */
static inline void bpf_restore_data_end(
struct sk_buff *skb, void *saved_data_end)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
cb->data_end = saved_data_end;
}
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
@@ -833,9 +836,6 @@ void xdp_do_flush_map(void);
void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb);
struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
@@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern int bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);

View File

@@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2018 NXP
*
* Header file for the IPC implementation.
*/
#ifndef _SC_IPC_H
#define _SC_IPC_H
#include <linux/device.h>
#include <linux/types.h>
#define IMX_SC_RPC_VERSION 1
#define IMX_SC_RPC_MAX_MSG 8
struct imx_sc_ipc;
enum imx_sc_rpc_svc {
IMX_SC_RPC_SVC_UNKNOWN = 0,
IMX_SC_RPC_SVC_RETURN = 1,
IMX_SC_RPC_SVC_PM = 2,
IMX_SC_RPC_SVC_RM = 3,
IMX_SC_RPC_SVC_TIMER = 5,
IMX_SC_RPC_SVC_PAD = 6,
IMX_SC_RPC_SVC_MISC = 7,
IMX_SC_RPC_SVC_IRQ = 8,
IMX_SC_RPC_SVC_ABORT = 9
};
struct imx_sc_rpc_msg {
uint8_t ver;
uint8_t size;
uint8_t svc;
uint8_t func;
};
/*
* This is an function to send an RPC message over an IPC channel.
* It is called by client-side SCFW API function shims.
*
* @param[in] ipc IPC handle
* @param[in,out] msg handle to a message
* @param[in] have_resp response flag
*
* If have_resp is true then this function waits for a response
* and returns the result in msg.
*/
int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
/*
* This function gets the default ipc handle used by SCU
*
* @param[out] ipc sc ipc handle
*
* @return Returns an error code (0 = success, failed if < 0)
*/
int imx_scu_get_handle(struct imx_sc_ipc **ipc);
#endif /* _SC_IPC_H */

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017~2018 NXP
*
* Header file containing the public System Controller Interface (SCI)
* definitions.
*/
#ifndef _SC_SCI_H
#define _SC_SCI_H
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
#endif /* _SC_SCI_H */

View File

@@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017~2018 NXP
*
* Header file containing the public API for the System Controller (SC)
* Miscellaneous (MISC) function.
*
* MISC_SVC (SVC) Miscellaneous Service
*
* Module for the Miscellaneous (MISC) service.
*/
#ifndef _SC_MISC_API_H
#define _SC_MISC_API_H
#include <linux/firmware/imx/sci.h>
/*
* This type is used to indicate RPC MISC function calls.
*/
enum imx_misc_func {
IMX_SC_MISC_FUNC_UNKNOWN = 0,
IMX_SC_MISC_FUNC_SET_CONTROL = 1,
IMX_SC_MISC_FUNC_GET_CONTROL = 2,
IMX_SC_MISC_FUNC_SET_MAX_DMA_GROUP = 4,
IMX_SC_MISC_FUNC_SET_DMA_GROUP = 5,
IMX_SC_MISC_FUNC_SECO_IMAGE_LOAD = 8,
IMX_SC_MISC_FUNC_SECO_AUTHENTICATE = 9,
IMX_SC_MISC_FUNC_DEBUG_OUT = 10,
IMX_SC_MISC_FUNC_WAVEFORM_CAPTURE = 6,
IMX_SC_MISC_FUNC_BUILD_INFO = 15,
IMX_SC_MISC_FUNC_UNIQUE_ID = 19,
IMX_SC_MISC_FUNC_SET_ARI = 3,
IMX_SC_MISC_FUNC_BOOT_STATUS = 7,
IMX_SC_MISC_FUNC_BOOT_DONE = 14,
IMX_SC_MISC_FUNC_OTP_FUSE_READ = 11,
IMX_SC_MISC_FUNC_OTP_FUSE_WRITE = 17,
IMX_SC_MISC_FUNC_SET_TEMP = 12,
IMX_SC_MISC_FUNC_GET_TEMP = 13,
IMX_SC_MISC_FUNC_GET_BOOT_DEV = 16,
IMX_SC_MISC_FUNC_GET_BUTTON_STATUS = 18,
};
/*
* Control Functions
*/
int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 val);
int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 *val);
#endif /* _SC_MISC_API_H */

View File

@@ -0,0 +1,617 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017~2018 NXP
*
* Header file containing types used across multiple service APIs.
*/
#ifndef _SC_TYPES_H
#define _SC_TYPES_H
/*
* This type is used to indicate a resource. Resources include peripherals
* and bus masters (but not memory regions). Note items from list should
* never be changed or removed (only added to at the end of the list).
*/
enum imx_sc_rsrc {
IMX_SC_R_A53 = 0,
IMX_SC_R_A53_0 = 1,
IMX_SC_R_A53_1 = 2,
IMX_SC_R_A53_2 = 3,
IMX_SC_R_A53_3 = 4,
IMX_SC_R_A72 = 5,
IMX_SC_R_A72_0 = 6,
IMX_SC_R_A72_1 = 7,
IMX_SC_R_A72_2 = 8,
IMX_SC_R_A72_3 = 9,
IMX_SC_R_CCI = 10,
IMX_SC_R_DB = 11,
IMX_SC_R_DRC_0 = 12,
IMX_SC_R_DRC_1 = 13,
IMX_SC_R_GIC_SMMU = 14,
IMX_SC_R_IRQSTR_M4_0 = 15,
IMX_SC_R_IRQSTR_M4_1 = 16,
IMX_SC_R_SMMU = 17,
IMX_SC_R_GIC = 18,
IMX_SC_R_DC_0_BLIT0 = 19,
IMX_SC_R_DC_0_BLIT1 = 20,
IMX_SC_R_DC_0_BLIT2 = 21,
IMX_SC_R_DC_0_BLIT_OUT = 22,
IMX_SC_R_DC_0_CAPTURE0 = 23,
IMX_SC_R_DC_0_CAPTURE1 = 24,
IMX_SC_R_DC_0_WARP = 25,
IMX_SC_R_DC_0_INTEGRAL0 = 26,
IMX_SC_R_DC_0_INTEGRAL1 = 27,
IMX_SC_R_DC_0_VIDEO0 = 28,
IMX_SC_R_DC_0_VIDEO1 = 29,
IMX_SC_R_DC_0_FRAC0 = 30,
IMX_SC_R_DC_0_FRAC1 = 31,
IMX_SC_R_DC_0 = 32,
IMX_SC_R_GPU_2_PID0 = 33,
IMX_SC_R_DC_0_PLL_0 = 34,
IMX_SC_R_DC_0_PLL_1 = 35,
IMX_SC_R_DC_1_BLIT0 = 36,
IMX_SC_R_DC_1_BLIT1 = 37,
IMX_SC_R_DC_1_BLIT2 = 38,
IMX_SC_R_DC_1_BLIT_OUT = 39,
IMX_SC_R_DC_1_CAPTURE0 = 40,
IMX_SC_R_DC_1_CAPTURE1 = 41,
IMX_SC_R_DC_1_WARP = 42,
IMX_SC_R_DC_1_INTEGRAL0 = 43,
IMX_SC_R_DC_1_INTEGRAL1 = 44,
IMX_SC_R_DC_1_VIDEO0 = 45,
IMX_SC_R_DC_1_VIDEO1 = 46,
IMX_SC_R_DC_1_FRAC0 = 47,
IMX_SC_R_DC_1_FRAC1 = 48,
IMX_SC_R_DC_1 = 49,
IMX_SC_R_GPU_3_PID0 = 50,
IMX_SC_R_DC_1_PLL_0 = 51,
IMX_SC_R_DC_1_PLL_1 = 52,
IMX_SC_R_SPI_0 = 53,
IMX_SC_R_SPI_1 = 54,
IMX_SC_R_SPI_2 = 55,
IMX_SC_R_SPI_3 = 56,
IMX_SC_R_UART_0 = 57,
IMX_SC_R_UART_1 = 58,
IMX_SC_R_UART_2 = 59,
IMX_SC_R_UART_3 = 60,
IMX_SC_R_UART_4 = 61,
IMX_SC_R_EMVSIM_0 = 62,
IMX_SC_R_EMVSIM_1 = 63,
IMX_SC_R_DMA_0_CH0 = 64,
IMX_SC_R_DMA_0_CH1 = 65,
IMX_SC_R_DMA_0_CH2 = 66,
IMX_SC_R_DMA_0_CH3 = 67,
IMX_SC_R_DMA_0_CH4 = 68,
IMX_SC_R_DMA_0_CH5 = 69,
IMX_SC_R_DMA_0_CH6 = 70,
IMX_SC_R_DMA_0_CH7 = 71,
IMX_SC_R_DMA_0_CH8 = 72,
IMX_SC_R_DMA_0_CH9 = 73,
IMX_SC_R_DMA_0_CH10 = 74,
IMX_SC_R_DMA_0_CH11 = 75,
IMX_SC_R_DMA_0_CH12 = 76,
IMX_SC_R_DMA_0_CH13 = 77,
IMX_SC_R_DMA_0_CH14 = 78,
IMX_SC_R_DMA_0_CH15 = 79,
IMX_SC_R_DMA_0_CH16 = 80,
IMX_SC_R_DMA_0_CH17 = 81,
IMX_SC_R_DMA_0_CH18 = 82,
IMX_SC_R_DMA_0_CH19 = 83,
IMX_SC_R_DMA_0_CH20 = 84,
IMX_SC_R_DMA_0_CH21 = 85,
IMX_SC_R_DMA_0_CH22 = 86,
IMX_SC_R_DMA_0_CH23 = 87,
IMX_SC_R_DMA_0_CH24 = 88,
IMX_SC_R_DMA_0_CH25 = 89,
IMX_SC_R_DMA_0_CH26 = 90,
IMX_SC_R_DMA_0_CH27 = 91,
IMX_SC_R_DMA_0_CH28 = 92,
IMX_SC_R_DMA_0_CH29 = 93,
IMX_SC_R_DMA_0_CH30 = 94,
IMX_SC_R_DMA_0_CH31 = 95,
IMX_SC_R_I2C_0 = 96,
IMX_SC_R_I2C_1 = 97,
IMX_SC_R_I2C_2 = 98,
IMX_SC_R_I2C_3 = 99,
IMX_SC_R_I2C_4 = 100,
IMX_SC_R_ADC_0 = 101,
IMX_SC_R_ADC_1 = 102,
IMX_SC_R_FTM_0 = 103,
IMX_SC_R_FTM_1 = 104,
IMX_SC_R_CAN_0 = 105,
IMX_SC_R_CAN_1 = 106,
IMX_SC_R_CAN_2 = 107,
IMX_SC_R_DMA_1_CH0 = 108,
IMX_SC_R_DMA_1_CH1 = 109,
IMX_SC_R_DMA_1_CH2 = 110,
IMX_SC_R_DMA_1_CH3 = 111,
IMX_SC_R_DMA_1_CH4 = 112,
IMX_SC_R_DMA_1_CH5 = 113,
IMX_SC_R_DMA_1_CH6 = 114,
IMX_SC_R_DMA_1_CH7 = 115,
IMX_SC_R_DMA_1_CH8 = 116,
IMX_SC_R_DMA_1_CH9 = 117,
IMX_SC_R_DMA_1_CH10 = 118,
IMX_SC_R_DMA_1_CH11 = 119,
IMX_SC_R_DMA_1_CH12 = 120,
IMX_SC_R_DMA_1_CH13 = 121,
IMX_SC_R_DMA_1_CH14 = 122,
IMX_SC_R_DMA_1_CH15 = 123,
IMX_SC_R_DMA_1_CH16 = 124,
IMX_SC_R_DMA_1_CH17 = 125,
IMX_SC_R_DMA_1_CH18 = 126,
IMX_SC_R_DMA_1_CH19 = 127,
IMX_SC_R_DMA_1_CH20 = 128,
IMX_SC_R_DMA_1_CH21 = 129,
IMX_SC_R_DMA_1_CH22 = 130,
IMX_SC_R_DMA_1_CH23 = 131,
IMX_SC_R_DMA_1_CH24 = 132,
IMX_SC_R_DMA_1_CH25 = 133,
IMX_SC_R_DMA_1_CH26 = 134,
IMX_SC_R_DMA_1_CH27 = 135,
IMX_SC_R_DMA_1_CH28 = 136,
IMX_SC_R_DMA_1_CH29 = 137,
IMX_SC_R_DMA_1_CH30 = 138,
IMX_SC_R_DMA_1_CH31 = 139,
IMX_SC_R_UNUSED1 = 140,
IMX_SC_R_UNUSED2 = 141,
IMX_SC_R_UNUSED3 = 142,
IMX_SC_R_UNUSED4 = 143,
IMX_SC_R_GPU_0_PID0 = 144,
IMX_SC_R_GPU_0_PID1 = 145,
IMX_SC_R_GPU_0_PID2 = 146,
IMX_SC_R_GPU_0_PID3 = 147,
IMX_SC_R_GPU_1_PID0 = 148,
IMX_SC_R_GPU_1_PID1 = 149,
IMX_SC_R_GPU_1_PID2 = 150,
IMX_SC_R_GPU_1_PID3 = 151,
IMX_SC_R_PCIE_A = 152,
IMX_SC_R_SERDES_0 = 153,
IMX_SC_R_MATCH_0 = 154,
IMX_SC_R_MATCH_1 = 155,
IMX_SC_R_MATCH_2 = 156,
IMX_SC_R_MATCH_3 = 157,
IMX_SC_R_MATCH_4 = 158,
IMX_SC_R_MATCH_5 = 159,
IMX_SC_R_MATCH_6 = 160,
IMX_SC_R_MATCH_7 = 161,
IMX_SC_R_MATCH_8 = 162,
IMX_SC_R_MATCH_9 = 163,
IMX_SC_R_MATCH_10 = 164,
IMX_SC_R_MATCH_11 = 165,
IMX_SC_R_MATCH_12 = 166,
IMX_SC_R_MATCH_13 = 167,
IMX_SC_R_MATCH_14 = 168,
IMX_SC_R_PCIE_B = 169,
IMX_SC_R_SATA_0 = 170,
IMX_SC_R_SERDES_1 = 171,
IMX_SC_R_HSIO_GPIO = 172,
IMX_SC_R_MATCH_15 = 173,
IMX_SC_R_MATCH_16 = 174,
IMX_SC_R_MATCH_17 = 175,
IMX_SC_R_MATCH_18 = 176,
IMX_SC_R_MATCH_19 = 177,
IMX_SC_R_MATCH_20 = 178,
IMX_SC_R_MATCH_21 = 179,
IMX_SC_R_MATCH_22 = 180,
IMX_SC_R_MATCH_23 = 181,
IMX_SC_R_MATCH_24 = 182,
IMX_SC_R_MATCH_25 = 183,
IMX_SC_R_MATCH_26 = 184,
IMX_SC_R_MATCH_27 = 185,
IMX_SC_R_MATCH_28 = 186,
IMX_SC_R_LCD_0 = 187,
IMX_SC_R_LCD_0_PWM_0 = 188,
IMX_SC_R_LCD_0_I2C_0 = 189,
IMX_SC_R_LCD_0_I2C_1 = 190,
IMX_SC_R_PWM_0 = 191,
IMX_SC_R_PWM_1 = 192,
IMX_SC_R_PWM_2 = 193,
IMX_SC_R_PWM_3 = 194,
IMX_SC_R_PWM_4 = 195,
IMX_SC_R_PWM_5 = 196,
IMX_SC_R_PWM_6 = 197,
IMX_SC_R_PWM_7 = 198,
IMX_SC_R_GPIO_0 = 199,
IMX_SC_R_GPIO_1 = 200,
IMX_SC_R_GPIO_2 = 201,
IMX_SC_R_GPIO_3 = 202,
IMX_SC_R_GPIO_4 = 203,
IMX_SC_R_GPIO_5 = 204,
IMX_SC_R_GPIO_6 = 205,
IMX_SC_R_GPIO_7 = 206,
IMX_SC_R_GPT_0 = 207,
IMX_SC_R_GPT_1 = 208,
IMX_SC_R_GPT_2 = 209,
IMX_SC_R_GPT_3 = 210,
IMX_SC_R_GPT_4 = 211,
IMX_SC_R_KPP = 212,
IMX_SC_R_MU_0A = 213,
IMX_SC_R_MU_1A = 214,
IMX_SC_R_MU_2A = 215,
IMX_SC_R_MU_3A = 216,
IMX_SC_R_MU_4A = 217,
IMX_SC_R_MU_5A = 218,
IMX_SC_R_MU_6A = 219,
IMX_SC_R_MU_7A = 220,
IMX_SC_R_MU_8A = 221,
IMX_SC_R_MU_9A = 222,
IMX_SC_R_MU_10A = 223,
IMX_SC_R_MU_11A = 224,
IMX_SC_R_MU_12A = 225,
IMX_SC_R_MU_13A = 226,
IMX_SC_R_MU_5B = 227,
IMX_SC_R_MU_6B = 228,
IMX_SC_R_MU_7B = 229,
IMX_SC_R_MU_8B = 230,
IMX_SC_R_MU_9B = 231,
IMX_SC_R_MU_10B = 232,
IMX_SC_R_MU_11B = 233,
IMX_SC_R_MU_12B = 234,
IMX_SC_R_MU_13B = 235,
IMX_SC_R_ROM_0 = 236,
IMX_SC_R_FSPI_0 = 237,
IMX_SC_R_FSPI_1 = 238,
IMX_SC_R_IEE = 239,
IMX_SC_R_IEE_R0 = 240,
IMX_SC_R_IEE_R1 = 241,
IMX_SC_R_IEE_R2 = 242,
IMX_SC_R_IEE_R3 = 243,
IMX_SC_R_IEE_R4 = 244,
IMX_SC_R_IEE_R5 = 245,
IMX_SC_R_IEE_R6 = 246,
IMX_SC_R_IEE_R7 = 247,
IMX_SC_R_SDHC_0 = 248,
IMX_SC_R_SDHC_1 = 249,
IMX_SC_R_SDHC_2 = 250,
IMX_SC_R_ENET_0 = 251,
IMX_SC_R_ENET_1 = 252,
IMX_SC_R_MLB_0 = 253,
IMX_SC_R_DMA_2_CH0 = 254,
IMX_SC_R_DMA_2_CH1 = 255,
IMX_SC_R_DMA_2_CH2 = 256,
IMX_SC_R_DMA_2_CH3 = 257,
IMX_SC_R_DMA_2_CH4 = 258,
IMX_SC_R_USB_0 = 259,
IMX_SC_R_USB_1 = 260,
IMX_SC_R_USB_0_PHY = 261,
IMX_SC_R_USB_2 = 262,
IMX_SC_R_USB_2_PHY = 263,
IMX_SC_R_DTCP = 264,
IMX_SC_R_NAND = 265,
IMX_SC_R_LVDS_0 = 266,
IMX_SC_R_LVDS_0_PWM_0 = 267,
IMX_SC_R_LVDS_0_I2C_0 = 268,
IMX_SC_R_LVDS_0_I2C_1 = 269,
IMX_SC_R_LVDS_1 = 270,
IMX_SC_R_LVDS_1_PWM_0 = 271,
IMX_SC_R_LVDS_1_I2C_0 = 272,
IMX_SC_R_LVDS_1_I2C_1 = 273,
IMX_SC_R_LVDS_2 = 274,
IMX_SC_R_LVDS_2_PWM_0 = 275,
IMX_SC_R_LVDS_2_I2C_0 = 276,
IMX_SC_R_LVDS_2_I2C_1 = 277,
IMX_SC_R_M4_0_PID0 = 278,
IMX_SC_R_M4_0_PID1 = 279,
IMX_SC_R_M4_0_PID2 = 280,
IMX_SC_R_M4_0_PID3 = 281,
IMX_SC_R_M4_0_PID4 = 282,
IMX_SC_R_M4_0_RGPIO = 283,
IMX_SC_R_M4_0_SEMA42 = 284,
IMX_SC_R_M4_0_TPM = 285,
IMX_SC_R_M4_0_PIT = 286,
IMX_SC_R_M4_0_UART = 287,
IMX_SC_R_M4_0_I2C = 288,
IMX_SC_R_M4_0_INTMUX = 289,
IMX_SC_R_M4_0_SIM = 290,
IMX_SC_R_M4_0_WDOG = 291,
IMX_SC_R_M4_0_MU_0B = 292,
IMX_SC_R_M4_0_MU_0A0 = 293,
IMX_SC_R_M4_0_MU_0A1 = 294,
IMX_SC_R_M4_0_MU_0A2 = 295,
IMX_SC_R_M4_0_MU_0A3 = 296,
IMX_SC_R_M4_0_MU_1A = 297,
IMX_SC_R_M4_1_PID0 = 298,
IMX_SC_R_M4_1_PID1 = 299,
IMX_SC_R_M4_1_PID2 = 300,
IMX_SC_R_M4_1_PID3 = 301,
IMX_SC_R_M4_1_PID4 = 302,
IMX_SC_R_M4_1_RGPIO = 303,
IMX_SC_R_M4_1_SEMA42 = 304,
IMX_SC_R_M4_1_TPM = 305,
IMX_SC_R_M4_1_PIT = 306,
IMX_SC_R_M4_1_UART = 307,
IMX_SC_R_M4_1_I2C = 308,
IMX_SC_R_M4_1_INTMUX = 309,
IMX_SC_R_M4_1_SIM = 310,
IMX_SC_R_M4_1_WDOG = 311,
IMX_SC_R_M4_1_MU_0B = 312,
IMX_SC_R_M4_1_MU_0A0 = 313,
IMX_SC_R_M4_1_MU_0A1 = 314,
IMX_SC_R_M4_1_MU_0A2 = 315,
IMX_SC_R_M4_1_MU_0A3 = 316,
IMX_SC_R_M4_1_MU_1A = 317,
IMX_SC_R_SAI_0 = 318,
IMX_SC_R_SAI_1 = 319,
IMX_SC_R_SAI_2 = 320,
IMX_SC_R_IRQSTR_SCU2 = 321,
IMX_SC_R_IRQSTR_DSP = 322,
IMX_SC_R_UNUSED5 = 323,
IMX_SC_R_UNUSED6 = 324,
IMX_SC_R_AUDIO_PLL_0 = 325,
IMX_SC_R_PI_0 = 326,
IMX_SC_R_PI_0_PWM_0 = 327,
IMX_SC_R_PI_0_PWM_1 = 328,
IMX_SC_R_PI_0_I2C_0 = 329,
IMX_SC_R_PI_0_PLL = 330,
IMX_SC_R_PI_1 = 331,
IMX_SC_R_PI_1_PWM_0 = 332,
IMX_SC_R_PI_1_PWM_1 = 333,
IMX_SC_R_PI_1_I2C_0 = 334,
IMX_SC_R_PI_1_PLL = 335,
IMX_SC_R_SC_PID0 = 336,
IMX_SC_R_SC_PID1 = 337,
IMX_SC_R_SC_PID2 = 338,
IMX_SC_R_SC_PID3 = 339,
IMX_SC_R_SC_PID4 = 340,
IMX_SC_R_SC_SEMA42 = 341,
IMX_SC_R_SC_TPM = 342,
IMX_SC_R_SC_PIT = 343,
IMX_SC_R_SC_UART = 344,
IMX_SC_R_SC_I2C = 345,
IMX_SC_R_SC_MU_0B = 346,
IMX_SC_R_SC_MU_0A0 = 347,
IMX_SC_R_SC_MU_0A1 = 348,
IMX_SC_R_SC_MU_0A2 = 349,
IMX_SC_R_SC_MU_0A3 = 350,
IMX_SC_R_SC_MU_1A = 351,
IMX_SC_R_SYSCNT_RD = 352,
IMX_SC_R_SYSCNT_CMP = 353,
IMX_SC_R_DEBUG = 354,
IMX_SC_R_SYSTEM = 355,
IMX_SC_R_SNVS = 356,
IMX_SC_R_OTP = 357,
IMX_SC_R_VPU_PID0 = 358,
IMX_SC_R_VPU_PID1 = 359,
IMX_SC_R_VPU_PID2 = 360,
IMX_SC_R_VPU_PID3 = 361,
IMX_SC_R_VPU_PID4 = 362,
IMX_SC_R_VPU_PID5 = 363,
IMX_SC_R_VPU_PID6 = 364,
IMX_SC_R_VPU_PID7 = 365,
IMX_SC_R_VPU_UART = 366,
IMX_SC_R_VPUCORE = 367,
IMX_SC_R_VPUCORE_0 = 368,
IMX_SC_R_VPUCORE_1 = 369,
IMX_SC_R_VPUCORE_2 = 370,
IMX_SC_R_VPUCORE_3 = 371,
IMX_SC_R_DMA_4_CH0 = 372,
IMX_SC_R_DMA_4_CH1 = 373,
IMX_SC_R_DMA_4_CH2 = 374,
IMX_SC_R_DMA_4_CH3 = 375,
IMX_SC_R_DMA_4_CH4 = 376,
IMX_SC_R_ISI_CH0 = 377,
IMX_SC_R_ISI_CH1 = 378,
IMX_SC_R_ISI_CH2 = 379,
IMX_SC_R_ISI_CH3 = 380,
IMX_SC_R_ISI_CH4 = 381,
IMX_SC_R_ISI_CH5 = 382,
IMX_SC_R_ISI_CH6 = 383,
IMX_SC_R_ISI_CH7 = 384,
IMX_SC_R_MJPEG_DEC_S0 = 385,
IMX_SC_R_MJPEG_DEC_S1 = 386,
IMX_SC_R_MJPEG_DEC_S2 = 387,
IMX_SC_R_MJPEG_DEC_S3 = 388,
IMX_SC_R_MJPEG_ENC_S0 = 389,
IMX_SC_R_MJPEG_ENC_S1 = 390,
IMX_SC_R_MJPEG_ENC_S2 = 391,
IMX_SC_R_MJPEG_ENC_S3 = 392,
IMX_SC_R_MIPI_0 = 393,
IMX_SC_R_MIPI_0_PWM_0 = 394,
IMX_SC_R_MIPI_0_I2C_0 = 395,
IMX_SC_R_MIPI_0_I2C_1 = 396,
IMX_SC_R_MIPI_1 = 397,
IMX_SC_R_MIPI_1_PWM_0 = 398,
IMX_SC_R_MIPI_1_I2C_0 = 399,
IMX_SC_R_MIPI_1_I2C_1 = 400,
IMX_SC_R_CSI_0 = 401,
IMX_SC_R_CSI_0_PWM_0 = 402,
IMX_SC_R_CSI_0_I2C_0 = 403,
IMX_SC_R_CSI_1 = 404,
IMX_SC_R_CSI_1_PWM_0 = 405,
IMX_SC_R_CSI_1_I2C_0 = 406,
IMX_SC_R_HDMI = 407,
IMX_SC_R_HDMI_I2S = 408,
IMX_SC_R_HDMI_I2C_0 = 409,
IMX_SC_R_HDMI_PLL_0 = 410,
IMX_SC_R_HDMI_RX = 411,
IMX_SC_R_HDMI_RX_BYPASS = 412,
IMX_SC_R_HDMI_RX_I2C_0 = 413,
IMX_SC_R_ASRC_0 = 414,
IMX_SC_R_ESAI_0 = 415,
IMX_SC_R_SPDIF_0 = 416,
IMX_SC_R_SPDIF_1 = 417,
IMX_SC_R_SAI_3 = 418,
IMX_SC_R_SAI_4 = 419,
IMX_SC_R_SAI_5 = 420,
IMX_SC_R_GPT_5 = 421,
IMX_SC_R_GPT_6 = 422,
IMX_SC_R_GPT_7 = 423,
IMX_SC_R_GPT_8 = 424,
IMX_SC_R_GPT_9 = 425,
IMX_SC_R_GPT_10 = 426,
IMX_SC_R_DMA_2_CH5 = 427,
IMX_SC_R_DMA_2_CH6 = 428,
IMX_SC_R_DMA_2_CH7 = 429,
IMX_SC_R_DMA_2_CH8 = 430,
IMX_SC_R_DMA_2_CH9 = 431,
IMX_SC_R_DMA_2_CH10 = 432,
IMX_SC_R_DMA_2_CH11 = 433,
IMX_SC_R_DMA_2_CH12 = 434,
IMX_SC_R_DMA_2_CH13 = 435,
IMX_SC_R_DMA_2_CH14 = 436,
IMX_SC_R_DMA_2_CH15 = 437,
IMX_SC_R_DMA_2_CH16 = 438,
IMX_SC_R_DMA_2_CH17 = 439,
IMX_SC_R_DMA_2_CH18 = 440,
IMX_SC_R_DMA_2_CH19 = 441,
IMX_SC_R_DMA_2_CH20 = 442,
IMX_SC_R_DMA_2_CH21 = 443,
IMX_SC_R_DMA_2_CH22 = 444,
IMX_SC_R_DMA_2_CH23 = 445,
IMX_SC_R_DMA_2_CH24 = 446,
IMX_SC_R_DMA_2_CH25 = 447,
IMX_SC_R_DMA_2_CH26 = 448,
IMX_SC_R_DMA_2_CH27 = 449,
IMX_SC_R_DMA_2_CH28 = 450,
IMX_SC_R_DMA_2_CH29 = 451,
IMX_SC_R_DMA_2_CH30 = 452,
IMX_SC_R_DMA_2_CH31 = 453,
IMX_SC_R_ASRC_1 = 454,
IMX_SC_R_ESAI_1 = 455,
IMX_SC_R_SAI_6 = 456,
IMX_SC_R_SAI_7 = 457,
IMX_SC_R_AMIX = 458,
IMX_SC_R_MQS_0 = 459,
IMX_SC_R_DMA_3_CH0 = 460,
IMX_SC_R_DMA_3_CH1 = 461,
IMX_SC_R_DMA_3_CH2 = 462,
IMX_SC_R_DMA_3_CH3 = 463,
IMX_SC_R_DMA_3_CH4 = 464,
IMX_SC_R_DMA_3_CH5 = 465,
IMX_SC_R_DMA_3_CH6 = 466,
IMX_SC_R_DMA_3_CH7 = 467,
IMX_SC_R_DMA_3_CH8 = 468,
IMX_SC_R_DMA_3_CH9 = 469,
IMX_SC_R_DMA_3_CH10 = 470,
IMX_SC_R_DMA_3_CH11 = 471,
IMX_SC_R_DMA_3_CH12 = 472,
IMX_SC_R_DMA_3_CH13 = 473,
IMX_SC_R_DMA_3_CH14 = 474,
IMX_SC_R_DMA_3_CH15 = 475,
IMX_SC_R_DMA_3_CH16 = 476,
IMX_SC_R_DMA_3_CH17 = 477,
IMX_SC_R_DMA_3_CH18 = 478,
IMX_SC_R_DMA_3_CH19 = 479,
IMX_SC_R_DMA_3_CH20 = 480,
IMX_SC_R_DMA_3_CH21 = 481,
IMX_SC_R_DMA_3_CH22 = 482,
IMX_SC_R_DMA_3_CH23 = 483,
IMX_SC_R_DMA_3_CH24 = 484,
IMX_SC_R_DMA_3_CH25 = 485,
IMX_SC_R_DMA_3_CH26 = 486,
IMX_SC_R_DMA_3_CH27 = 487,
IMX_SC_R_DMA_3_CH28 = 488,
IMX_SC_R_DMA_3_CH29 = 489,
IMX_SC_R_DMA_3_CH30 = 490,
IMX_SC_R_DMA_3_CH31 = 491,
IMX_SC_R_AUDIO_PLL_1 = 492,
IMX_SC_R_AUDIO_CLK_0 = 493,
IMX_SC_R_AUDIO_CLK_1 = 494,
IMX_SC_R_MCLK_OUT_0 = 495,
IMX_SC_R_MCLK_OUT_1 = 496,
IMX_SC_R_PMIC_0 = 497,
IMX_SC_R_PMIC_1 = 498,
IMX_SC_R_SECO = 499,
IMX_SC_R_CAAM_JR1 = 500,
IMX_SC_R_CAAM_JR2 = 501,
IMX_SC_R_CAAM_JR3 = 502,
IMX_SC_R_SECO_MU_2 = 503,
IMX_SC_R_SECO_MU_3 = 504,
IMX_SC_R_SECO_MU_4 = 505,
IMX_SC_R_HDMI_RX_PWM_0 = 506,
IMX_SC_R_A35 = 507,
IMX_SC_R_A35_0 = 508,
IMX_SC_R_A35_1 = 509,
IMX_SC_R_A35_2 = 510,
IMX_SC_R_A35_3 = 511,
IMX_SC_R_DSP = 512,
IMX_SC_R_DSP_RAM = 513,
IMX_SC_R_CAAM_JR1_OUT = 514,
IMX_SC_R_CAAM_JR2_OUT = 515,
IMX_SC_R_CAAM_JR3_OUT = 516,
IMX_SC_R_VPU_DEC_0 = 517,
IMX_SC_R_VPU_ENC_0 = 518,
IMX_SC_R_CAAM_JR0 = 519,
IMX_SC_R_CAAM_JR0_OUT = 520,
IMX_SC_R_PMIC_2 = 521,
IMX_SC_R_DBLOGIC = 522,
IMX_SC_R_HDMI_PLL_1 = 523,
IMX_SC_R_BOARD_R0 = 524,
IMX_SC_R_BOARD_R1 = 525,
IMX_SC_R_BOARD_R2 = 526,
IMX_SC_R_BOARD_R3 = 527,
IMX_SC_R_BOARD_R4 = 528,
IMX_SC_R_BOARD_R5 = 529,
IMX_SC_R_BOARD_R6 = 530,
IMX_SC_R_BOARD_R7 = 531,
IMX_SC_R_MJPEG_DEC_MP = 532,
IMX_SC_R_MJPEG_ENC_MP = 533,
IMX_SC_R_VPU_TS_0 = 534,
IMX_SC_R_VPU_MU_0 = 535,
IMX_SC_R_VPU_MU_1 = 536,
IMX_SC_R_VPU_MU_2 = 537,
IMX_SC_R_VPU_MU_3 = 538,
IMX_SC_R_VPU_ENC_1 = 539,
IMX_SC_R_VPU = 540,
IMX_SC_R_LAST
};
/* NOTE - please add by replacing some of the UNUSED from above! */
/*
* This type is used to indicate a control.
*/
enum imx_sc_ctrl {
IMX_SC_C_TEMP = 0,
IMX_SC_C_TEMP_HI = 1,
IMX_SC_C_TEMP_LOW = 2,
IMX_SC_C_PXL_LINK_MST1_ADDR = 3,
IMX_SC_C_PXL_LINK_MST2_ADDR = 4,
IMX_SC_C_PXL_LINK_MST_ENB = 5,
IMX_SC_C_PXL_LINK_MST1_ENB = 6,
IMX_SC_C_PXL_LINK_MST2_ENB = 7,
IMX_SC_C_PXL_LINK_SLV1_ADDR = 8,
IMX_SC_C_PXL_LINK_SLV2_ADDR = 9,
IMX_SC_C_PXL_LINK_MST_VLD = 10,
IMX_SC_C_PXL_LINK_MST1_VLD = 11,
IMX_SC_C_PXL_LINK_MST2_VLD = 12,
IMX_SC_C_SINGLE_MODE = 13,
IMX_SC_C_ID = 14,
IMX_SC_C_PXL_CLK_POLARITY = 15,
IMX_SC_C_LINESTATE = 16,
IMX_SC_C_PCIE_G_RST = 17,
IMX_SC_C_PCIE_BUTTON_RST = 18,
IMX_SC_C_PCIE_PERST = 19,
IMX_SC_C_PHY_RESET = 20,
IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21,
IMX_SC_C_PANIC = 22,
IMX_SC_C_PRIORITY_GROUP = 23,
IMX_SC_C_TXCLK = 24,
IMX_SC_C_CLKDIV = 25,
IMX_SC_C_DISABLE_50 = 26,
IMX_SC_C_DISABLE_125 = 27,
IMX_SC_C_SEL_125 = 28,
IMX_SC_C_MODE = 29,
IMX_SC_C_SYNC_CTRL0 = 30,
IMX_SC_C_KACHUNK_CNT = 31,
IMX_SC_C_KACHUNK_SEL = 32,
IMX_SC_C_SYNC_CTRL1 = 33,
IMX_SC_C_DPI_RESET = 34,
IMX_SC_C_MIPI_RESET = 35,
IMX_SC_C_DUAL_MODE = 36,
IMX_SC_C_VOLTAGE = 37,
IMX_SC_C_PXL_LINK_SEL = 38,
IMX_SC_C_OFS_SEL = 39,
IMX_SC_C_OFS_AUDIO = 40,
IMX_SC_C_OFS_PERIPH = 41,
IMX_SC_C_OFS_IRQ = 42,
IMX_SC_C_RST0 = 43,
IMX_SC_C_RST1 = 44,
IMX_SC_C_SEL0 = 45,
IMX_SC_C_LAST
};
#endif /* _SC_TYPES_H */

View File

@@ -17,6 +17,7 @@ enum {
SM_EFUSE_READ,
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
SM_GET_CHIP_ID,
};
struct meson_sm_firmware;

View File

@@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2018 Xilinx
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
*/
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
#define ZYNQMP_PM_VERSION ((ZYNQMP_PM_VERSION_MAJOR << 16) | \
ZYNQMP_PM_VERSION_MINOR)
#define ZYNQMP_TZ_VERSION_MAJOR 1
#define ZYNQMP_TZ_VERSION_MINOR 0
#define ZYNQMP_TZ_VERSION ((ZYNQMP_TZ_VERSION_MAJOR << 16) | \
ZYNQMP_TZ_VERSION_MINOR)
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
#define PM_GET_TRUSTZONE_VERSION 0xa03
/* Number of 32bits values in payload */
#define PAYLOAD_ARG_CNT 4U
enum pm_api_id {
PM_GET_API_VERSION = 1,
PM_IOCTL = 34,
PM_QUERY_DATA,
PM_CLOCK_ENABLE,
PM_CLOCK_DISABLE,
PM_CLOCK_GETSTATE,
PM_CLOCK_SETDIVIDER,
PM_CLOCK_GETDIVIDER,
PM_CLOCK_SETRATE,
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
};
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT,
XST_PM_NO_ACCESS,
XST_PM_INVALID_NODE,
XST_PM_DOUBLE_REQ,
XST_PM_ABORT_SUSPEND,
};
enum pm_ioctl_id {
IOCTL_SET_PLL_FRAC_MODE = 8,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
};
enum pm_query_id {
PM_QID_INVALID,
PM_QID_CLOCK_GET_NAME,
PM_QID_CLOCK_GET_TOPOLOGY,
PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
PM_QID_CLOCK_GET_PARENTS,
PM_QID_CLOCK_GET_ATTRIBUTES,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
};
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
* @arg1: Argument 1 of query data
* @arg2: Argument 2 of query data
* @arg3: Argument 3 of query data
*/
struct zynqmp_pm_query_data {
u32 qid;
u32 arg1;
u32 arg2;
u32 arg3;
};
struct zynqmp_eemi_ops {
int (*get_api_version)(u32 *version);
int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
int (*clock_enable)(u32 clock_id);
int (*clock_disable)(u32 clock_id);
int (*clock_getstate)(u32 clock_id, u32 *state);
int (*clock_setdivider)(u32 clock_id, u32 divider);
int (*clock_getdivider)(u32 clock_id, u32 *divider);
int (*clock_setrate)(u32 clock_id, u64 rate);
int (*clock_getrate)(u32 clock_id, u64 *rate);
int (*clock_setparent)(u32 clock_id, u32 parent_id);
int (*clock_getparent)(u32 clock_id, u32 *parent_id);
int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
};
#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
#else
static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
{
return NULL;
}
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */

View File

@@ -69,4 +69,8 @@ void fpga_bridge_free(struct fpga_bridge *br);
int fpga_bridge_register(struct fpga_bridge *br);
void fpga_bridge_unregister(struct fpga_bridge *br);
struct fpga_bridge
*devm_fpga_bridge_create(struct device *dev, const char *name,
const struct fpga_bridge_ops *br_ops, void *priv);
#endif /* _LINUX_FPGA_BRIDGE_H */

View File

@@ -53,12 +53,20 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_OPERATING,
};
/*
* FPGA Manager flags
* FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
* FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
* FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
* FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
/**
* DOC: FPGA Manager flags
*
* Flags used in the &fpga_image_info->flags field
*
* %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
*
* %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
*
* %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
*
* %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
*
* %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
@@ -190,4 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr);
int fpga_mgr_register(struct fpga_manager *mgr);
void fpga_mgr_unregister(struct fpga_manager *mgr);
struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
const struct fpga_manager_ops *mops,
void *priv);
#endif /*_LINUX_FPGA_MGR_H */

View File

@@ -44,4 +44,8 @@ void fpga_region_free(struct fpga_region *region);
int fpga_region_register(struct fpga_region *region);
void fpga_region_unregister(struct fpga_region *region);
struct fpga_region
*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
int (*get_bridges)(struct fpga_region *));
#endif /* _FPGA_REGION_H */

View File

@@ -403,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
* @i_pages: Cached pages.
* @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED mappings.
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
* @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
* @writeback_index: Writeback starts here.
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
* @private_lock: For use by the owner of the address_space.
* @private_list: For use by the owner of the address_space.
* @private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host; /* owner: inode, block_device */
struct radix_tree_root i_pages; /* cached pages */
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root_cached i_mmap; /* tree of private and shared mappings */
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by the i_pages lock */
unsigned long nrpages; /* number of total pages */
/* number of shadow or DAX exceptional entries */
struct inode *host;
struct xarray i_pages;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
unsigned long nrexceptional;
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits */
spinlock_t private_lock; /* for use by the address_space */
gfp_t gfp_mask; /* implicit gfp mask for allocations */
struct list_head private_list; /* for use by the address_space */
void *private_data; /* ditto */
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
errseq_t wb_err;
spinlock_t private_lock;
struct list_head private_list;
void *private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -467,15 +483,18 @@ struct block_device {
struct mutex bd_fsfreeze_mutex;
} __randomize_layout;
/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
* radix trees
*/
#define PAGECACHE_TAG_DIRTY 0
#define PAGECACHE_TAG_WRITEBACK 1
#define PAGECACHE_TAG_TOWRITE 2
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
#define PAGECACHE_TAG_DIRTY XA_MARK_0
#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
#define PAGECACHE_TAG_TOWRITE XA_MARK_2
int mapping_tagged(struct address_space *mapping, int tag);
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
static inline void i_mmap_lock_write(struct address_space *mapping)
{
@@ -1393,17 +1412,26 @@ struct super_block {
struct sb_writers s_writers;
/*
* Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
* s_fsnotify_marks together for cache efficiency. They are frequently
* accessed and rarely modified.
*/
void *s_fs_info; /* Filesystem private info */
/* Granularity of c/m/atime in ns (cannot be worse than a second) */
u32 s_time_gran;
#ifdef CONFIG_FSNOTIFY
__u32 s_fsnotify_mask;
struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
#endif
char s_id[32]; /* Informational name */
uuid_t s_uuid; /* UUID */
void *s_fs_info; /* Filesystem private info */
unsigned int s_max_links;
fmode_t s_mode;
/* Granularity of c/m/atime in ns.
Cannot be worse than a second */
u32 s_time_gran;
/*
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
@@ -1428,6 +1456,9 @@ struct super_block {
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
/* Pending fsnotify inode refs */
atomic_long_t s_fsnotify_inode_refs;
/* Being remounted read-only */
int s_readonly_remount;
@@ -1721,6 +1752,25 @@ struct block_device_operations;
#define NOMMU_VMFLAGS \
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
/*
* These flags control the behavior of the remap_file_range function pointer.
* If it is called with len == 0 that means "remap to end of source file".
* See Documentation/filesystems/vfs.txt for more details about this call.
*
* REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
* REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
*/
#define REMAP_FILE_DEDUP (1 << 0)
#define REMAP_FILE_CAN_SHORTEN (1 << 1)
/*
* These flags signal that the caller is ok with altering various aspects of
* the behavior of the remap operation. The changes must be made by the
* implementation; the vfs remap helper functions can take advantage of them.
* Flags in this category exist to preserve the quirky behavior of the hoisted
* btrfs clone/dedupe ioctls.
*/
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
struct iov_iter;
@@ -1759,10 +1809,9 @@ struct file_operations {
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
loff_t, size_t, unsigned int);
int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t,
u64);
int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
u64);
loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
} __randomize_layout;
@@ -1825,19 +1874,21 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
struct inode *inode_out, loff_t pos_out,
u64 *len, bool is_dedupe);
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out, u64 len);
extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t destoff,
loff_t len, bool *is_same);
extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *count,
unsigned int remap_flags);
extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
extern int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
u64 len);
extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
struct super_operations {
@@ -2773,19 +2824,6 @@ static inline void file_end_write(struct file *file)
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
u64 len)
{
int ret;
file_start_write(file_out);
ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
file_end_write(file_out);
return ret;
}
/*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
@@ -2978,6 +3016,9 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern int generic_remap_checks(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *count, unsigned int remap_flags);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);

View File

@@ -351,6 +351,14 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
#define dev_is_fsl_mc(_dev) (0)
#endif
/* Macro to check if a device is a container device */
#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
FSL_MC_IS_DPRC)
/* Macro to get the container device of a MC device */
#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
(_dev) : (_dev)->parent)
/*
* module_fsl_mc_driver() - Helper macro for drivers that don't do
* anything special in module init/exit. This eliminates a lot of
@@ -405,6 +413,7 @@ extern struct device_type fsl_mc_bus_dpcon_type;
extern struct device_type fsl_mc_bus_dpmcp_type;
extern struct device_type fsl_mc_bus_dpmac_type;
extern struct device_type fsl_mc_bus_dprtc_type;
extern struct device_type fsl_mc_bus_dpseci_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -451,6 +460,11 @@ static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
}
static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
{
return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
}
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP

View File

@@ -274,6 +274,8 @@
*/
/* Auto Boot Mode */
#define IFC_NAND_NCFGR_BOOT 0x80000000
/* SRAM Initialization */
#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000
/* Addressing Mode-ROW0+n/COL0 */
#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
/* Addressing Mode-ROW0+n/COL0+n */

View File

@@ -68,15 +68,20 @@
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \
FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \
FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME)
/* Extra flags that may be reported with event or control handling of events */
#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark;
@@ -189,10 +194,10 @@ struct fsnotify_group {
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
int f_flags;
int flags; /* flags from fanotify_init() */
int f_flags; /* event_f_flags from fanotify_init() */
unsigned int max_marks;
struct user_struct *user;
bool audit;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
@@ -206,12 +211,14 @@ struct fsnotify_group {
enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
static inline bool fsnotify_valid_obj_type(unsigned int type)
@@ -255,6 +262,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
FSNOTIFY_ITER_FUNCS(inode, INODE)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
#define fsnotify_foreach_obj_type(type) \
for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
@@ -267,8 +275,8 @@ struct fsnotify_mark_connector;
typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
/*
* Inode / vfsmount point to this structure which tracks all marks attached to
* the inode / vfsmount. The reference to inode / vfsmount is held by this
* Inode/vfsmount/sb point to this structure which tracks all marks attached to
* the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this
* structure. We destroy this structure when there are no more marks attached
* to it. The structure is protected by fsnotify_mark_srcu.
*/
@@ -335,6 +343,7 @@ extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int dat
extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
@@ -455,9 +464,13 @@ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *gr
{
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
}
/* run all the marks in a group, and clear all of the sn marks */
static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
{
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct super_block *sb);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
@@ -484,6 +497,9 @@ static inline void __fsnotify_inode_delete(struct inode *inode)
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
static inline void fsnotify_sb_delete(struct super_block *sb)
{}
static inline void fsnotify_update_flags(struct dentry *dentry)
{}

View File

@@ -402,10 +402,11 @@ static inline void free_part_info(struct hd_struct *part)
extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
/* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk);
extern void device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups);
static inline void add_disk(struct gendisk *disk)
{
device_add_disk(NULL, disk);
device_add_disk(NULL, disk, NULL);
}
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
static inline void add_disk_no_queue_reg(struct gendisk *disk)

View File

@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
int node, bool hugepage);
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
int node);
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
alloc_pages(gfp_mask, order)
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
alloc_pages_vma(gfp_mask, 0, vma, addr, node)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);

View File

@@ -17,11 +17,20 @@ struct device;
*/
struct gpio_desc;
/**
* Opaque descriptor for a structure of GPIO array attributes. This structure
* is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
* passed back to get/set array functions in order to activate fast processing
* path if applicable.
*/
struct gpio_array;
/**
* Struct containing an array of descriptors that can be obtained using
* gpiod_get_array().
*/
struct gpio_descs {
struct gpio_array *info;
unsigned int ndescs;
struct gpio_desc *desc[];
};
@@ -30,6 +39,7 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
* Optional flags that can be passed to one of gpiod_* to configure direction
@@ -104,36 +114,46 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array, int *value_array);
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
void gpiod_set_value(struct gpio_desc *desc, int value);
void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array, int *value_array);
int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
int gpiod_get_raw_value(const struct gpio_desc *desc);
int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
struct gpio_array *array_info,
unsigned long *value_bitmap);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
struct gpio_array *array_info,
unsigned long *value_bitmap);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
struct gpio_array *array_info,
unsigned long *value_bitmap);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
@@ -330,7 +350,8 @@ static inline int gpiod_get_value(const struct gpio_desc *desc)
}
static inline int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -341,12 +362,14 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
static inline int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
@@ -356,7 +379,8 @@ static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
}
static inline int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -368,8 +392,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
WARN_ON(1);
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -384,7 +409,8 @@ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
}
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -395,12 +421,14 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
@@ -410,7 +438,8 @@ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
}
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -424,7 +453,8 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);

View File

@@ -66,9 +66,15 @@ struct gpio_irq_chip {
/**
* @lock_key:
*
* Per GPIO IRQ chip lockdep classes.
* Per GPIO IRQ chip lockdep class for IRQ lock.
*/
struct lock_class_key *lock_key;
/**
* @request_key:
*
* Per GPIO IRQ chip lockdep class for IRQ request.
*/
struct lock_class_key *request_key;
/**
@@ -94,6 +100,13 @@ struct gpio_irq_chip {
*/
unsigned int num_parents;
/**
* @parent_irq:
*
* For use by gpiochip_set_cascaded_irqchip()
*/
unsigned int parent_irq;
/**
* @parents:
*
@@ -138,6 +151,20 @@ struct gpio_irq_chip {
* will allocate and map all IRQs during initialization.
*/
unsigned int first;
/**
* @irq_enable:
*
* Store old irq_chip irq_enable callback
*/
void (*irq_enable)(struct irq_data *data);
/**
* @irq_disable:
*
* Store old irq_chip irq_disable callback
*/
void (*irq_disable)(struct irq_data *data);
};
static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
@@ -158,9 +185,13 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
* (same as GPIOF_DIR_XXX), or negative error
* (same as GPIOF_DIR_XXX), or negative error.
* It is recommended to always implement this function, even on
* input-only or output-only gpio chips.
* @direction_input: configures signal "offset" as input, or returns error
* This can be omitted on input-only or output-only gpio chips.
* @direction_output: configures signal "offset" as output, or returns error
* This can be omitted on input-only or output-only gpio chips.
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @get_multiple: reads values for multiple signals defined by "mask" and
* stores them in "bits", returns 0 on success or negative error
@@ -256,6 +287,9 @@ struct gpio_chip {
void (*dbg_show)(struct seq_file *s,
struct gpio_chip *chip);
int (*init_valid_mask)(struct gpio_chip *chip);
int base;
u16 ngpio;
const char *const *names;
@@ -294,7 +328,9 @@ struct gpio_chip {
/**
* @need_valid_mask:
*
* If set core allocates @valid_mask with all bits set to one.
* If set core allocates @valid_mask with all its values initialized
* with init_valid_mask() or set to one if init_valid_mask() is not
* defined
*/
bool need_valid_mask;
@@ -395,6 +431,10 @@ extern struct gpio_chip *gpiochip_find(void *data,
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset);
void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset);
void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset);
void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset);
/* Line status inquiry for drivers */
bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);

View File

@@ -101,8 +101,8 @@ enum hdmi_extended_colorimetry {
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
HDMI_EXTENDED_COLORIMETRY_OPRGB,
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
@@ -163,6 +163,9 @@ struct hdmi_avi_infoframe {
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
void *buffer, size_t size);
int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product);
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
void *buffer, size_t size);
int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +278,9 @@ struct hdmi_audio_infoframe {
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe {
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
union hdmi_vendor_any_infoframe {
struct {
@@ -330,10 +342,14 @@ union hdmi_infoframe {
struct hdmi_audio_infoframe audio;
};
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
void *buffer, size_t size);
int hdmi_infoframe_check(union hdmi_infoframe *frame);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
union hdmi_infoframe *frame);
const union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */

View File

@@ -722,8 +722,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
* raw_event and event should return 0 on no action performed, 1 when no
* further processing should be done and negative on error
* raw_event and event should return negative on error, any other value will
* pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -1139,6 +1139,34 @@ static inline u32 hid_report_len(struct hid_report *report)
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
/**
* struct hid_scroll_counter - Utility class for processing high-resolution
* scroll events.
* @dev: the input device for which events should be reported.
* @microns_per_hi_res_unit: the amount moved by the user's finger for each
* high-resolution unit reported by the mouse, in
* microns.
* @resolution_multiplier: the wheel's resolution in high-resolution mode as a
* multiple of its lower resolution. For example, if
* moving the wheel by one "notch" would result in a
* value of 1 in low-resolution mode but 8 in
* high-resolution, the multiplier is 8.
* @remainder: counts the number of high-resolution units moved since the last
* low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
* only be used by class methods.
*/
struct hid_scroll_counter {
struct input_dev *dev;
int microns_per_hi_res_unit;
int resolution_multiplier;
int remainder;
};
void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
int hi_res_value);
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);

View File

@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Authors: Jérôme Glisse <jglisse@redhat.com>
* Authors: Jérôme Glisse <jglisse@redhat.com>
*/
/*
* Heterogeneous Memory Management (HMM)
@@ -107,7 +107,7 @@ enum hmm_pfn_flag_e {
* HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
* HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
* HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
* result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not
* result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
* be mirrored by a device, because the entry will never have HMM_PFN_VALID
* set and the pfn value is undefined.
*
@@ -274,13 +274,28 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
struct hmm_mirror;
/*
* enum hmm_update_type - type of update
* enum hmm_update_event - type of update
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
*/
enum hmm_update_type {
enum hmm_update_event {
HMM_UPDATE_INVALIDATE,
};
/*
* struct hmm_update - HMM update informations for callback
*
* @start: virtual start address of the range to update
* @end: virtual end address of the range to update
* @event: event triggering the update (what is happening)
* @blockable: can the callback block/sleep ?
*/
struct hmm_update {
unsigned long start;
unsigned long end;
enum hmm_update_event event;
bool blockable;
};
/*
* struct hmm_mirror_ops - HMM mirror device operations callback
*
@@ -300,9 +315,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
* @update_type: type of update that occurred to the CPU page table
* @start: virtual start address of the range to update
* @end: virtual end address of the range to update
* @update: update informations (see struct hmm_update)
* Returns: -EAGAIN if update.blockable false and callback need to
* block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table
@@ -313,10 +328,8 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call.
*/
void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
enum hmm_update_type update_type,
unsigned long start,
unsigned long end);
int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
const struct hmm_update *update);
};
/*

View File

@@ -43,7 +43,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
@@ -213,9 +213,9 @@ static inline int hpage_nr_pages(struct page *page)
}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags);
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags);
pud_t *pud, int flags, struct dev_pagemap **pgmap);
extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
@@ -344,13 +344,13 @@ static inline void mm_put_huge_zero_page(struct mm_struct *mm)
}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags)
unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
{
return NULL;
}
static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pud, int flags)
unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
{
return NULL;
}

View File

@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
pte_t *ptep)
{
return 0;
}
static inline void adjust_range_if_pmd_sharing_possible(
struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
}
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })

View File

@@ -33,7 +33,8 @@
* and max is a multiple of 4 and >= 32 bytes.
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
* (per mill).
* (in bits of entropy per 1024 bits of input;
* valid values: 1 to 1024, or 0 for unknown).
*/
struct hwrng {
const char *name;

View File

@@ -118,6 +118,7 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
hwmon_in_enable,
};
#define HWMON_I_INPUT BIT(hwmon_in_input)
@@ -135,6 +136,7 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
#define HWMON_I_ENABLE BIT(hwmon_in_enable)
enum hwmon_curr_attributes {
hwmon_curr_input,

View File

@@ -739,8 +739,9 @@ struct vmbus_channel {
u32 ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
void *ringbuffer_pages;
struct page *ringbuffer_page;
u32 ringbuffer_pagecount;
u32 ringbuffer_send_offset;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
@@ -1021,6 +1022,14 @@ struct vmbus_packet_mpb_array {
struct hv_mpb_array range;
} __packed;
int vmbus_alloc_ring(struct vmbus_channel *channel,
u32 send_size, u32 recv_size);
void vmbus_free_ring(struct vmbus_channel *channel);
int vmbus_connect_ring(struct vmbus_channel *channel,
void (*onchannel_callback)(void *context),
void *context);
int vmbus_disconnect_ring(struct vmbus_channel *channel);
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -1125,6 +1134,7 @@ struct hv_device {
u16 device_id;
struct device device;
char *driver_override; /* Driver name to force a match */
struct vmbus_channel *channel;
struct kset *channels_kset;
@@ -1442,7 +1452,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
void hv_process_channel_removal(u32 relid);
void hv_process_channel_removal(struct vmbus_channel *channel);
void vmbus_setevent(struct vmbus_channel *channel);
/*

View File

@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);

View File

@@ -214,8 +214,7 @@ static inline void idr_preload_end(void)
++id, (entry) = idr_get_next((idr), &(id)))
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
* IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
@@ -225,14 +224,14 @@ struct ida_bitmap {
unsigned long bitmap[IDA_BITMAP_LONGS];
};
DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
struct ida {
struct radix_tree_root ida_rt;
struct xarray xa;
};
#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
#define IDA_INIT(name) { \
.ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \
.xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
}
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
@@ -292,7 +291,7 @@ static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
static inline void ida_init(struct ida *ida)
{
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
#define ida_simple_get(ida, start, end, gfp) \
@@ -301,9 +300,6 @@ static inline void ida_init(struct ida *ida)
static inline bool ida_is_empty(const struct ida *ida)
{
return radix_tree_empty(&ida->ida_rt);
return xa_empty(&ida->xa);
}
/* in lib/radix-tree.c */
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
#endif /* __IDR_H__ */

View File

@@ -1460,13 +1460,16 @@ struct ieee80211_ht_operation {
* STA can receive. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest RX data rate supported.
* The top 3 bits of this field are reserved.
* The top 3 bits of this field indicate the Maximum NSTS,total
* (a beamformee capability.)
* @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
* @tx_highest: Indicates highest long GI VHT PPDU data rate
* STA can transmit. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest TX data rate supported.
* The top 3 bits of this field are reserved.
* The top 2 bits of this field are reserved, the
* 3rd bit from the top indiciates VHT Extended NSS BW
* Capability.
*/
struct ieee80211_vht_mcs_info {
__le16 rx_mcs_map;
@@ -1475,6 +1478,13 @@ struct ieee80211_vht_mcs_info {
__le16 tx_highest;
} __packed;
/* for rx_highest */
#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
/* for tx_highest */
#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
/**
* enum ieee80211_vht_mcs_support - VHT MCS support definitions
* @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
@@ -1545,11 +1555,11 @@ struct ieee80211_vht_operation {
* struct ieee80211_he_cap_elem - HE capabilities element
*
* This structure is the "HE capabilities element" fixed fields as
* described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
* described in P802.11ax_D3.0 section 9.4.2.237.2 and 9.4.2.237.3
*/
struct ieee80211_he_cap_elem {
u8 mac_cap_info[5];
u8 phy_cap_info[9];
u8 mac_cap_info[6];
u8 phy_cap_info[11];
} __packed;
#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
@@ -1650,6 +1660,7 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
@@ -1659,6 +1670,7 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
@@ -1678,6 +1690,26 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
/**
* ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
* @cap: VHT capabilities of the peer
* @bw: bandwidth to use
* @mcs: MCS index to use
* @ext_nss_bw_capable: indicates whether or not the local transmitter
* (rate scaling algorithm) can deal with the new logic
* (dot11VHTExtendedNSSBWCapable)
*
* Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
* vary for a given BW/MCS. This function parses the data.
*
* Note: This function is exported by cfg80211.
*/
int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
enum ieee80211_vht_chanwidth bw,
int mcs, bool ext_nss_bw_capable);
/* 802.11ax HE MAC capabilities */
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
@@ -1707,15 +1739,15 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
/* Link adaptation is split between byte HE_MAC_CAP1 and
* HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
@@ -1729,14 +1761,13 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
#define IEEE80211_HE_MAC_CAP2_TRS 0x04
#define IEEE80211_HE_MAC_CAP2_BSR 0x08
#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
@@ -1744,25 +1775,34 @@ struct ieee80211_mu_edca_param_set {
* A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
* same field in the HE capabilities.
*/
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00
#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08
#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10
#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18
#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
#define IEEE80211_HE_MAC_CAP4_QTP 0x02
#define IEEE80211_HE_MAC_CAP4_BQR 0x04
#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08
#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
#define IEEE80211_HE_MAC_CAP4_OPS 0x20
#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
/* Multi TID agg TX is split between byte #4 and #5
* The value is a combination of B39,B40,B41
*/
#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04
#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
/* 802.11ax HE PHY capabilities */
#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
@@ -1779,10 +1819,10 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
@@ -1883,7 +1923,19 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00
#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40
#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80
#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0
#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0
#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
/* 802.11ax HE TX/RX MCS NSS Support */
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
@@ -1963,8 +2015,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000
#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000
#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000

Some files were not shown because too many files have changed in this diff Show More