Merge tag 'kvmarm-fixes-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm fixes for 5.3

- A bunch of switch/case fall-through annotation, fixing one actual bug
- Fix PMU reset bug
- Add missing exception class debug strings
This commit is contained in:
Paolo Bonzini
2019-08-09 16:53:39 +02:00
5794 changed files with 623707 additions and 111796 deletions

View File

@@ -324,7 +324,10 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
{
return -1;
}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
@@ -374,6 +377,7 @@ extern acpi_status wmi_install_notify_handler(const char *guid,
extern acpi_status wmi_remove_notify_handler(const char *guid);
extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
extern char *wmi_get_acpi_device_uid(const char *guid);
#endif /* CONFIG_ACPI_WMI */

View File

@@ -48,6 +48,7 @@ extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
extern struct workqueue_struct *bdi_async_bio_wq;
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{

View File

@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
#include <linux/const.h>
#include <asm/bitsperlong.h>
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT(nr) (UL(1) << (nr))
#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
@@ -17,10 +19,11 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) \
(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
(((~UL(0)) - (UL(1) << (l)) + 1) & \
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
(((~0ULL) - (1ULL << (l)) + 1) & \
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
(((~ULL(0)) - (ULL(1) << (l)) + 1) & \
(~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif /* __LINUX_BITS_H */

View File

@@ -132,13 +132,17 @@ struct blkcg_gq {
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
spinlock_t async_bio_lock;
struct bio_list async_bios;
struct work_struct async_bio_work;
atomic_t use_delay;
atomic64_t delay_nsec;
atomic64_t delay_start;
u64 last_delay;
int last_use;
struct rcu_head rcu_head;
};
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
@@ -701,6 +705,15 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
bool __blkcg_punt_bio_submit(struct bio *bio);
static inline bool blkcg_punt_bio_submit(struct bio *bio)
{
if (bio->bi_opf & REQ_CGROUP_PUNT)
return __blkcg_punt_bio_submit(bio);
else
return false;
}
static inline void blkcg_bio_issue_init(struct bio *bio)
{
@@ -848,6 +861,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }

View File

@@ -311,6 +311,14 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
* can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
* submit_bio() punt the actual issuing to a dedicated per-blkcg
* work item to avoid such priority inversions.
*/
__REQ_CGROUP_PUNT,
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
@@ -337,6 +345,8 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_HIPRI (1ULL << __REQ_HIPRI)

View File

@@ -344,10 +344,15 @@ struct queue_limits {
#ifdef CONFIG_BLK_DEV_ZONED
/*
* Maximum number of zones to report with a single report zones command.
*/
#define BLK_ZONED_REPORT_MAX_ZONES 8192U
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
extern int blkdev_report_zones(struct block_device *bdev,
sector_t sector, struct blk_zone *zones,
unsigned int *nr_zones, gfp_t gfp_mask);
unsigned int *nr_zones);
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
sector_t nr_sectors, gfp_t gfp_mask);
extern int blk_revalidate_disk_zones(struct gendisk *disk);
@@ -681,7 +686,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
@@ -1418,7 +1423,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1673,8 +1678,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask);
struct blk_zone *zones, unsigned int *nr_zones);
struct module *owner;
const struct pr_ops *pr_ops;
};

View File

@@ -211,6 +211,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_MSG_ADDR2 | \
CEPH_FEATURE_CEPHX_V2)
#define CEPH_FEATURES_REQUIRED_DEFAULT 0

View File

@@ -682,7 +682,7 @@ extern const char *ceph_cap_op_name(int op);
/* flags field in client cap messages (version >= 10) */
#define CEPH_CLIENT_CAPS_SYNC (1<<0)
#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1)
#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2);
#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2)
/*
* caps message, used for capability callbacks, acks, requests, etc.

View File

@@ -52,4 +52,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc,
char *lock_name, u8 *type, char **tag,
struct ceph_locker **lockers, u32 *num_lockers);
int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
char *lock_name, u8 type, char *cookie, char *tag);
#endif

View File

@@ -218,18 +218,27 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
/*
* sockaddr_storage <-> ceph_sockaddr
*/
static inline void ceph_encode_addr(struct ceph_entity_addr *a)
#define CEPH_ENTITY_ADDR_TYPE_NONE 0
#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1)
static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = htons(a->in_addr.ss_family);
a->in_addr.ss_family = *(__u16 *)&ss_family;
/* Banner addresses require TYPE_NONE */
a->type = CEPH_ENTITY_ADDR_TYPE_NONE;
}
static inline void ceph_decode_addr(struct ceph_entity_addr *a)
static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
a->in_addr.ss_family = ntohs(ss_family);
WARN_ON(a->in_addr.ss_family == 512);
a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
}
extern int ceph_decode_entity_addr(void **p, void *end,
struct ceph_entity_addr *addr);
/*
* encoders
*/

View File

@@ -84,11 +84,13 @@ struct ceph_options {
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
/*
* Handle the largest possible rbd object in one message.
* The largest possible rbd data object is 32M.
* The largest possible rbd object map object is 64M.
*
* There is no limit on the size of cephfs objects, but it has to obey
* rsize and wsize mount options anyway.
*/
#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024)
#define CEPH_AUTH_NAME_DEFAULT "guest"
@@ -299,10 +301,6 @@ int ceph_wait_for_latest_osdmap(struct ceph_client *client,
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern struct page **ceph_get_direct_page_vector(const void __user *data,
int num_pages,
bool write_page);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);

View File

@@ -104,7 +104,6 @@ struct ceph_mon_client {
#endif
};
extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
extern int ceph_monmap_contains(struct ceph_monmap *m,
struct ceph_entity_addr *addr);

View File

@@ -198,9 +198,9 @@ struct ceph_osd_request {
bool r_mempool;
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
struct list_head r_private_item; /* ditto */
void *r_priv; /* ditto */
/* set by submitter */
@@ -389,6 +389,14 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
#define osd_req_op_data(oreq, whch, typ, fld) \
({ \
struct ceph_osd_request *__oreq = (oreq); \
unsigned int __whch = (whch); \
BUG_ON(__whch >= __oreq->r_num_ops); \
&__oreq->r_ops[__whch].typ.fld; \
})
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
@@ -497,7 +505,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
const char *class, const char *method,
unsigned int flags,
struct page *req_page, size_t req_len,
struct page *resp_page, size_t *resp_len);
struct page **resp_pages, size_t *resp_len);
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,

View File

@@ -66,4 +66,6 @@ int ceph_extent_to_file(struct ceph_file_layout *l,
struct ceph_file_extent **file_extents,
u32 *num_file_extents);
u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size);
#endif

View File

@@ -624,7 +624,7 @@ struct cftype {
/*
* Control Group subsystem type.
* See Documentation/cgroup-v1/cgroups.rst for details
* See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);

View File

@@ -699,6 +699,7 @@ void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
struct cgroup_subsys_state;
struct cgroup;
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }

View File

@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/of_clk.h>
#ifdef CONFIG_COMMON_CLK
/*
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
@@ -807,7 +805,14 @@ void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
#else
static inline struct clk_hw *__clk_get_hw(struct clk *clk)
{
return (struct clk_hw *)clk;
}
#endif
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
@@ -867,8 +872,6 @@ static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate,
*/
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;
struct clk_onecell_data {
struct clk **clks;
unsigned int clk_num;
@@ -879,8 +882,6 @@ struct clk_hw_onecell_data {
struct clk_hw *hws[];
};
extern struct of_device_id __clk_of_table;
#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
/*
@@ -904,6 +905,40 @@ extern struct of_device_id __clk_of_table;
.ops = _ops, \
})
#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
.name = _name, \
.parent_hws = (const struct clk_hw*[]) { _parent }, \
.num_parents = 1, \
.ops = _ops, \
})
/*
* This macro is intended for drivers to be able to share the otherwise
* individual struct clk_hw[] compound literals created by the compiler
* when using CLK_HW_INIT_HW. It does NOT support multiple parents.
*/
#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
.name = _name, \
.parent_hws = _parent, \
.num_parents = 1, \
.ops = _ops, \
})
#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
.name = _name, \
.parent_data = (const struct clk_parent_data[]) { \
{ .fw_name = _parent }, \
}, \
.num_parents = 1, \
.ops = _ops, \
})
#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
@@ -913,6 +948,24 @@ extern struct of_device_id __clk_of_table;
.ops = _ops, \
})
#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
.name = _name, \
.parent_hws = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.ops = _ops, \
})
#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
.name = _name, \
.parent_data = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
.ops = _ops, \
})
#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
@@ -933,6 +986,43 @@ extern struct of_device_id __clk_of_table;
_flags), \
}
#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \
_div, _mult, _flags) \
struct clk_fixed_factor _struct = { \
.div = _div, \
.mult = _mult, \
.hw.init = CLK_HW_INIT_HW(_name, \
_parent, \
&clk_fixed_factor_ops, \
_flags), \
}
/*
* This macro allows the driver to reuse the _parent array for multiple
* fixed factor clk declarations.
*/
#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \
_div, _mult, _flags) \
struct clk_fixed_factor _struct = { \
.div = _div, \
.mult = _mult, \
.hw.init = CLK_HW_INIT_HWS(_name, \
_parent, \
&clk_fixed_factor_ops, \
_flags), \
}
#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \
_div, _mult, _flags) \
struct clk_fixed_factor _struct = { \
.div = _div, \
.mult = _mult, \
.hw.init = CLK_HW_INIT_FW_NAME(_name, \
_parent, \
&clk_fixed_factor_ops, \
_flags), \
}
#ifdef CONFIG_OF
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
@@ -1019,5 +1109,4 @@ static inline int of_clk_detect_critical(struct device_node *np, int index,
void clk_gate_restore_context(struct clk_hw *hw);
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */

View File

@@ -329,6 +329,19 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
* clk_bulk_get_optional - lookup and obtain a number of references to clock producer
* @dev: device for clock "consumer"
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table of consumer
*
* Behaves the same as clk_bulk_get() except where there is no clock producer.
* In this case, instead of returning -ENOENT, the function returns 0 and
* NULL for a clk for which a clock producer could not be determined.
*/
int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
@@ -343,6 +356,28 @@ int __must_check clk_bulk_get_all(struct device *dev,
*/
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
* devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
*
* Behaves the same as devm_clk_bulk_get() except where there is no clock
* producer. In this case, instead of returning -ENOENT, the function returns
* NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
*
* Returns 0 if all clocks specified in clk_bulk_data table are obtained
* successfully or for any clk there was no clk provider available, otherwise
* returns valid IS_ERR() condition containing errno.
* The implementation uses @dev and @clk_bulk_data.id to determine the
* clock consumer, and thereby the clock producer.
* The clock returned is stored in each @clk_bulk_data.clk field.
*
* Drivers must assume that the clock source is not enabled.
*
* clk_bulk_get should not be called from within interrupt context.
*/
int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
* devm_clk_bulk_get_all - managed get multiple clk consumers
* @dev: device for clock "consumer"
@@ -715,6 +750,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
return 0;
}
static inline int __must_check clk_bulk_get_optional(struct device *dev,
int num_clks, struct clk_bulk_data *clks)
{
return 0;
}
static inline int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
@@ -738,6 +779,12 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk
return 0;
}
static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
int num_clks, struct clk_bulk_data *clks)
{
return 0;
}
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{

View File

@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
#if defined(__linux__)
typedef unsigned long long u_quad_t;
#endif
#include <uapi/linux/coda.h>
#endif

View File

@@ -1,72 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CODA_PSDEV_H
#define __CODA_PSDEV_H
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <uapi/linux/coda_psdev.h>
struct kstatfs;
/* communication pending/processing queues */
struct venus_comm {
u_long vc_seq;
wait_queue_head_t vc_waitq; /* Venus wait queue */
struct list_head vc_pending;
struct list_head vc_processing;
int vc_inuse;
struct super_block *vc_sb;
struct mutex vc_mutex;
};
static inline struct venus_comm *coda_vcp(struct super_block *sb)
{
return (struct venus_comm *)((sb)->s_fs_info);
}
/* upcalls */
int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
int venus_getattr(struct super_block *sb, struct CodaFid *fid,
struct coda_vattr *attr);
int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
int venus_lookup(struct super_block *sb, struct CodaFid *fid,
const char *name, int length, int *type,
struct CodaFid *resfid);
int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
kuid_t uid);
int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
struct file **f);
int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length,
struct CodaFid *newfid, struct coda_vattr *attrs);
int venus_create(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length, int excl, int mode,
struct CodaFid *newfid, struct coda_vattr *attrs) ;
int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length);
int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
const char *name, int length);
int venus_readlink(struct super_block *sb, struct CodaFid *fid,
char *buffer, int *length);
int venus_rename(struct super_block *, struct CodaFid *new_fid,
struct CodaFid *old_fid, size_t old_length,
size_t new_length, const char *old_name,
const char *new_name);
int venus_link(struct super_block *sb, struct CodaFid *fid,
struct CodaFid *dirfid, const char *name, int len );
int venus_symlink(struct super_block *sb, struct CodaFid *fid,
const char *name, int len, const char *symname, int symlen);
int venus_access(struct super_block *sb, struct CodaFid *fid, int mask);
int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
unsigned int cmd, struct PioctlData *data);
int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out);
int venus_fsync(struct super_block *sb, struct CodaFid *fid);
int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
/*
* Statistics
*/
extern struct venus_comm coda_comms[];
#endif

View File

@@ -138,8 +138,7 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
sigset_t *set, sigset_t *oldset,
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize);
struct compat_sigaction {

View File

@@ -170,3 +170,5 @@
#else
#define __diag_GCC_8(s)
#endif
#define __no_fgcse __attribute__((optimize("-fno-gcse")))

View File

@@ -116,9 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(".rodata..c_jump_table")
#else
#define annotate_reachable()
#define annotate_unreachable()
#define __annotate_jump_table
#endif
#ifndef ASM_UNREACHABLE

View File

@@ -189,6 +189,10 @@ struct ftrace_likely_data {
#define asm_volatile_goto(x...) asm goto(x)
#endif
#ifndef __no_fgcse
# define __no_fgcse
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))

View File

@@ -55,10 +55,71 @@ struct cn_dev {
struct cn_queue_dev *cbdev;
};
/**
* cn_add_callback() - Registers new callback with connector core.
*
* @id: unique connector's user identifier.
* It must be registered in connector.h for legal
* in-kernel users.
* @name: connector's callback symbolic name.
* @callback: connector's callback.
* parameters are %cn_msg and the sender's credentials
*/
int cn_add_callback(struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_del_callback(struct cb_id *);
/**
* cn_del_callback() - Unregisters new callback with connector core.
*
* @id: unique connector's user identifier.
*/
void cn_del_callback(struct cb_id *id);
/**
* cn_netlink_send_mult - Sends message to the specified groups.
*
* @msg: message header(with attached data).
* @len: Number of @msg to be sent.
* @portid: destination port.
* If non-zero the message will be sent to the given port,
* which should be set to the original sender.
* @group: destination group.
* If @portid and @group is zero, then appropriate group will
* be searched through all registered connector users, and
* message will be delivered to the group which was created
* for user with the same ID as in @msg.
* If @group is not zero, then message will be delivered
* to the specified group.
* @gfp_mask: GFP mask.
*
* It can be safely called from softirq context, but may silently
* fail under strong memory pressure.
*
* If there are no listeners for given group %-ESRCH can be returned.
*/
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
/**
* cn_netlink_send_mult - Sends message to the specified groups.
*
* @msg: message header(with attached data).
* @portid: destination port.
* If non-zero the message will be sent to the given port,
* which should be set to the original sender.
* @group: destination group.
* If @portid and @group is zero, then appropriate group will
* be searched through all registered connector users, and
* message will be delivered to the group which was created
* for user with the same ID as in @msg.
* If @group is not zero, then message will be delivered
* to the specified group.
* @gfp_mask: GFP mask.
*
* It can be safely called from softirq context, but may silently
* fail under strong memory pressure.
*
* If there are no listeners for given group %-ESRCH can be returned.
*/
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,

View File

@@ -47,11 +47,6 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
struct cpufreq_user_policy {
unsigned int min; /* in kHz */
unsigned int max; /* in kHz */
};
struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
@@ -81,7 +76,8 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
struct cpufreq_user_policy user_policy;
struct dev_pm_qos_request *min_freq_req;
struct dev_pm_qos_request *max_freq_req;
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
@@ -144,6 +140,9 @@ struct cpufreq_policy {
/* Pointer to the cooling device if used for thermal mitigation */
struct thermal_cooling_device *cdev;
struct notifier_block nb_min;
struct notifier_block nb_max;
};
struct cpufreq_freqs {
@@ -201,6 +200,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
@@ -992,7 +992,7 @@ extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
int cpufreq_generic_init(struct cpufreq_policy *policy,
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
#endif /* _LINUX_CPUFREQ_H */

View File

@@ -7,6 +7,9 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
/* Flag for synchronous flush */
#define DAXDEV_F_SYNC (1UL << 0)
typedef unsigned long dax_entry_t;
struct iomap_ops;
@@ -38,18 +41,40 @@ extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host);
struct dax_device *alloc_dax(void *private, const char *host,
const struct dax_operations *ops);
const struct dax_operations *ops, unsigned long flags);
void put_dax(struct dax_device *dax_dev);
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
bool __dax_synchronous(struct dax_device *dax_dev);
static inline bool dax_synchronous(struct dax_device *dax_dev)
{
return __dax_synchronous(dax_dev);
}
void __set_dax_synchronous(struct dax_device *dax_dev);
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
__set_dax_synchronous(dax_dev);
}
/*
* Check if given mapping is supported by the file / underlying device.
*/
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
struct dax_device *dax_dev)
{
if (!(vma->vm_flags & VM_SYNC))
return true;
if (!IS_DAX(file_inode(vma->vm_file)))
return false;
return dax_synchronous(dax_dev);
}
#else
static inline struct dax_device *dax_get_by_host(const char *host)
{
return NULL;
}
static inline struct dax_device *alloc_dax(void *private, const char *host,
const struct dax_operations *ops)
const struct dax_operations *ops, unsigned long flags)
{
/*
* Callers should check IS_ENABLED(CONFIG_DAX) to know if this
@@ -70,6 +95,18 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
{
return false;
}
static inline bool dax_synchronous(struct dax_device *dax_dev)
{
return true;
}
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
struct dax_device *dax_dev)
{
return !(vma->vm_flags & VM_SYNC);
}
#endif
struct writeback_control;

View File

@@ -291,7 +291,6 @@ static inline unsigned d_count(const struct dentry *dentry)
*/
extern __printf(4, 5)
char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);

View File

@@ -95,8 +95,7 @@ typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **
typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
struct blk_zone *zones,
unsigned int *nr_zones,
gfp_t gfp_mask);
unsigned int *nr_zones);
/*
* These iteration functions are typically used to check (and combine)
@@ -530,29 +529,20 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*---------------------------------------------------------------*/
#define DM_NAME "device-mapper"
#define DM_RATELIMIT(pr_func, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
\
if (__ratelimit(&rs)) \
pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
} while (0)
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#ifdef CONFIG_DM_DEBUG
#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#else
#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)

View File

@@ -6,7 +6,7 @@
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
* See Documentation/driver-model/ for more information.
* See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _DEVICE_H_

View File

@@ -82,6 +82,7 @@ struct dim_stats {
* @prev_stats: Measured rates from previous iteration (for comparison)
* @start_sample: Sampled data at start of current iteration
* @work: Work to perform on action required
* @priv: A pointer to the struct that points to dim
* @profile_ix: Current moderation profile
* @mode: CQ period count mode
* @tune_state: Algorithm tuning state (see below)
@@ -95,6 +96,7 @@ struct dim {
struct dim_sample start_sample;
struct dim_sample measuring_sample;
struct work_struct work;
void *priv;
u8 profile_ix;
u8 mode;
u8 tune_state;
@@ -363,4 +365,25 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
*/
void net_dim(struct dim *dim, struct dim_sample end_sample);
/* RDMA DIM */
/*
* RDMA DIM profile:
* profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES.
*/
#define RDMA_DIM_PARAMS_NUM_PROFILES 9
#define RDMA_DIM_START_PROFILE 0
/**
* rdma_dim - Runs the adaptive moderation.
* @dim: The moderation struct.
* @completions: The number of completions collected in this round.
*
* Each call to rdma_dim takes the latest amount of completions that
* have been collected and counts them as a new event.
* Once enough events have been collected the algorithm decides a new
* moderation level.
*/
void rdma_dim(struct dim *dim, u64 completions);
#endif /* DIM_H */

View File

@@ -28,18 +28,20 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
* @map_atomic: [optional] maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
* This callback must not sleep.
* @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
* This Callback must not sleep.
* @map: [optional] maps a page from the buffer into kernel address space.
* @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
/**
* @cache_sgt_mapping:
*
* If true the framework will cache the first mapping made for each
* attachment. This avoids creating mappings for attachments multiple
* times.
*/
bool cache_sgt_mapping;
/**
* @attach:
*
@@ -194,8 +196,6 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*map)(struct dma_buf *, unsigned long);
void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
@@ -234,6 +234,31 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
/**
* @map:
*
* Maps a page from the buffer into kernel address space. The page is
* specified by offset into the buffer in PAGE_SIZE units.
*
* This callback is optional.
*
* Returns:
*
* Virtual address pointer where requested page can be accessed. NULL
* on error or when this function is unimplemented by the exporter.
*/
void *(*map)(struct dma_buf *, unsigned long);
/**
* @unmap:
*
* Unmaps a page from the buffer. Page offset and address pointer should
* be the same as the one passed to and returned by matching call to map.
*
* This callback is optional.
*/
void (*unmap)(struct dma_buf *, unsigned long, void *);
void *(*vmap)(struct dma_buf *);
void (*vunmap)(struct dma_buf *, void *vaddr);
};
@@ -244,10 +269,12 @@ struct dma_buf_ops {
* @file: file pointer used for sharing buffers across, and for refcounting.
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
* @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
* @lock: used internally to serialize list manipulation, attach/detach and
* vmap/unmap, and accesses to name
* @vmapping_counter: used internally to refcnt the vmaps
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
* @name: userspace-provided name; useful for accounting and debugging.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
@@ -275,6 +302,7 @@ struct dma_buf {
unsigned vmapping_counter;
void *vmap_ptr;
const char *exp_name;
const char *name;
struct module *owner;
struct list_head list_node;
void *priv;
@@ -296,6 +324,8 @@ struct dma_buf {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment.
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
@@ -311,6 +341,8 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
void *priv;
};

View File

@@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool force_dma_unencrypted(struct device *dev);
#else
static inline bool force_dma_unencrypted(struct device *dev)
{
return false;
}
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
/*
* If memory encryption is supported, phys_to_dma will set the memory encryption
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma

View File

@@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
/**
* dma_addressing_limited - return if the device is addressing limited
* @dev: device to check
*
* Return %true if the devices DMA mask is too small to address all memory in
* the system, else %false. Lack of addressing bits is the prime reason for
* bounce buffering, but might not be the only one.
*/
static inline bool dma_addressing_limited(struct device *dev)
{
return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
dma_get_required_mask(dev);
}
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);

47
include/linux/dma/edma.h Normal file
View File

@@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA core driver
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#ifndef _DW_EDMA_H
#define _DW_EDMA_H
#include <linux/device.h>
#include <linux/dmaengine.h>
struct dw_edma;
/**
* struct dw_edma_chip - representation of DesignWare eDMA controller hardware
* @dev: struct device of the eDMA controller
* @id: instance ID
* @irq: irq line
* @dw: struct dw_edma that is filed by dw_edma_probe()
*/
struct dw_edma_chip {
struct device *dev;
int id;
int irq;
struct dw_edma *dw;
};
/* Export to the platform drivers */
#if IS_ENABLED(CONFIG_DW_EDMA)
int dw_edma_probe(struct dw_edma_chip *chip);
int dw_edma_remove(struct dw_edma_chip *chip);
#else
static inline int dw_edma_probe(struct dw_edma_chip *chip)
{
return -ENODEV;
}
static inline int dw_edma_remove(struct dw_edma_chip *chip)
{
return 0;
}
#endif /* CONFIG_DW_EDMA */
#endif /* _DW_EDMA_H */

View File

@@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MXS_DMA_H_
#define _MXS_DMA_H_
#include <linux/dmaengine.h>
#define MXS_DMA_CTRL_WAIT4END BIT(31)
#define MXS_DMA_CTRL_WAIT4RDY BIT(30)
/*
* The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words
* in the second argument to dmaengine_prep_slave_sg when the direction is
* set to DMA_TRANS_NONE. To make this clear and to prevent users from doing
* the error prone casting we have this wrapper function
*/
static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio(
struct dma_chan *chan, u32 *pio, unsigned int npio,
enum dma_transfer_direction dir, unsigned long flags)
{
return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio,
dir, flags);
}
#endif /* _MXS_DMA_H_ */

View File

@@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param);
dma_filter_fn fn, void *fn_param,
struct device_node *np);
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
@@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void)
{
}
static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param)
dma_filter_fn fn,
void *fn_param,
struct device_node *np)
{
return NULL;
}
@@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
@@ -1417,6 +1421,6 @@ static inline struct dma_chan
if (!fn || !fn_param)
return NULL;
return __dma_request_channel(mask, fn, fn_param);
return __dma_request_channel(mask, fn, fn_param, NULL);
}
#endif /* DMAENGINE_H */

View File

@@ -75,7 +75,7 @@ struct elevator_type
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
const char *elevator_name;
const char *elevator_alias;
struct module *elevator_owner;
#ifdef CONFIG_BLK_DEBUG_FS
@@ -160,15 +160,6 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
#define ELEVATOR_INSERT_FLUSH 5
#define ELEVATOR_INSERT_SORT_MERGE 6
/*
* return values from elevator_may_queue_fn
*/
enum {
ELV_MQUEUE_MAY,
ELV_MQUEUE_NO,
ELV_MQUEUE_MUST,
};
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)

View File

@@ -747,7 +747,7 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
return size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_ctx_wide_store_ok(off, size, type, field) \
#define bpf_ctx_wide_access_ok(off, size, type, field) \
(size == sizeof(__u64) && \
off >= offsetof(type, field) && \
off + sizeof(__u64) <= offsetofend(type, field) && \

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Analog Devices AXI common registers & definitions
*
* Copyright 2019 Analog Devices Inc.
*
* https://wiki.analog.com/resources/fpga/docs/axi_ip
* https://wiki.analog.com/resources/fpga/docs/hdl/regmap
*/
#ifndef ADI_AXI_COMMON_H_
#define ADI_AXI_COMMON_H_
#define ADI_AXI_REG_VERSION 0x0000
#define ADI_AXI_PCORE_VER(major, minor, patch) \
(((major) << 16) | ((minor) << 8) | (patch))
#endif /* ADI_AXI_COMMON_H_ */

View File

@@ -2210,9 +2210,6 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
extern struct dentry *mount_ns(struct file_system_type *fs_type,
int flags, void *data, void *ns, struct user_namespace *user_ns,
int (*fill_super)(struct super_block *, void *, int));
#ifdef CONFIG_BLOCK
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
@@ -2252,28 +2249,10 @@ void free_anon_bdev(dev_t);
struct super_block *sget_fc(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
int (*set)(struct super_block *, struct fs_context *));
struct super_block *sget_userns(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, struct user_namespace *user_ns,
void *data);
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *,
const struct super_operations *ops,
const struct xattr_handler **xattr,
const struct dentry_operations *dops,
unsigned long);
static inline struct dentry *
mount_pseudo(struct file_system_type *fs_type, char *name,
const struct super_operations *ops,
const struct dentry_operations *dops, unsigned long magic)
{
return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic);
}
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \

View File

@@ -99,6 +99,7 @@ struct fs_context {
void *s_fs_info; /* Proposed s_fs_info */
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
unsigned int sb_flags_mask; /* Superblock flags that were changed */
unsigned int s_iflags; /* OR'd with sb->s_iflags */
unsigned int lsm_flags; /* Information flags from the fs to the LSM */
enum fs_context_purpose purpose:8;
enum fs_context_phase phase:8; /* The phase the context is in */
@@ -146,6 +147,12 @@ extern int vfs_get_super(struct fs_context *fc,
enum vfs_get_super_keying keying,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
extern int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
extern int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
extern const struct file_operations fscontext_fops;

View File

@@ -20,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
}
void pin_remove(struct fs_pin *);
void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
void pin_insert(struct fs_pin *, struct vfsmount *);
void pin_kill(struct fs_pin *);

View File

@@ -427,8 +427,8 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
iter = ftrace_rec_iter_next(iter))
int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_location_range(unsigned long start, unsigned long end);

View File

@@ -106,6 +106,7 @@ void devm_gpio_free(struct device *dev, unsigned int gpio);
struct device;
struct gpio_chip;
struct pinctrl_dev;
static inline bool gpio_is_valid(int number)
{

View File

@@ -586,6 +586,8 @@ void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
#else /* ! CONFIG_PINCTRL */
struct pinctrl_dev;
static inline int
gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,

View File

@@ -47,6 +47,7 @@ enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_AVI = 0x82,
HDMI_INFOFRAME_TYPE_SPD = 0x83,
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
HDMI_INFOFRAME_TYPE_DRM = 0x87,
};
#define HDMI_IEEE_OUI 0x000c03
@@ -55,6 +56,7 @@ enum hdmi_infoframe_type {
#define HDMI_AVI_INFOFRAME_SIZE 13
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
#define HDMI_DRM_INFOFRAME_SIZE 26
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
@@ -152,6 +154,17 @@ enum hdmi_content_type {
HDMI_CONTENT_TYPE_GAME,
};
enum hdmi_metadata_type {
HDMI_STATIC_METADATA_TYPE1 = 1,
};
enum hdmi_eotf {
HDMI_EOTF_TRADITIONAL_GAMMA_SDR,
HDMI_EOTF_TRADITIONAL_GAMMA_HDR,
HDMI_EOTF_SMPTE_ST2084,
HDMI_EOTF_BT_2100_HLG,
};
struct hdmi_avi_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
@@ -175,12 +188,37 @@ struct hdmi_avi_infoframe {
unsigned short right_bar;
};
/* DRM Infoframe as per CTA 861.G spec */
struct hdmi_drm_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
unsigned char length;
enum hdmi_eotf eotf;
enum hdmi_metadata_type metadata_type;
struct {
u16 x, y;
} display_primaries[3];
struct {
u16 x, y;
} white_point;
u16 max_display_mastering_luminance;
u16 min_display_mastering_luminance;
u16 max_cll;
u16 max_fall;
};
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
void *buffer, size_t size);
int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame);
ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
void *buffer, size_t size);
int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -320,6 +358,33 @@ struct hdmi_vendor_infoframe {
unsigned int s3d_ext_data;
};
/* HDR Metadata as per 861.G spec */
struct hdr_static_metadata {
__u8 eotf;
__u8 metadata_type;
__u16 max_cll;
__u16 max_fall;
__u16 min_cll;
};
/**
* struct hdr_sink_metadata - HDR sink metadata
*
* Metadata Information read from Sink's EDID
*/
struct hdr_sink_metadata {
/**
* @metadata_type: Static_Metadata_Descriptor_ID.
*/
__u32 metadata_type;
/**
* @hdmi_type1: HDR Metadata Infoframe.
*/
union {
struct hdr_static_metadata hdmi_type1;
};
};
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
@@ -344,6 +409,7 @@ union hdmi_vendor_any_infoframe {
* @spd: spd infoframe
* @vendor: union of all vendor infoframes
* @audio: audio infoframe
* @drm: Dynamic Range and Mastering infoframe
*
* This is used by the generic pack function. This works since all infoframes
* have the same header which also indicates which type of infoframe should be
@@ -355,6 +421,7 @@ union hdmi_infoframe {
struct hdmi_spd_infoframe spd;
union hdmi_vendor_any_infoframe vendor;
struct hdmi_audio_infoframe audio;
struct hdmi_drm_infoframe drm;
};
ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,

View File

@@ -21,8 +21,8 @@
*
* HMM address space mirroring API:
*
* Use HMM address space mirroring if you want to mirror range of the CPU page
* table of a process into a device page table. Here, "mirror" means "keep
* Use HMM address space mirroring if you want to mirror a range of the CPU
* page tables of a process into a device page table. Here, "mirror" means "keep
* synchronized". Prerequisites: the device must provide the ability to write-
* protect its page tables (at PAGE_SIZE granularity), and must be able to
* recover from the resulting potential page faults.
@@ -62,7 +62,7 @@
#include <linux/kconfig.h>
#include <asm/pgtable.h>
#if IS_ENABLED(CONFIG_HMM)
#ifdef CONFIG_HMM_MIRROR
#include <linux/device.h>
#include <linux/migrate.h>
@@ -82,19 +82,18 @@
* @mirrors_sem: read/write semaphore protecting the mirrors list
* @wq: wait queue for user waiting on a range invalidation
* @notifiers: count of active mmu notifiers
* @dead: is the mm dead ?
*/
struct hmm {
struct mm_struct *mm;
struct kref kref;
struct mutex lock;
spinlock_t ranges_lock;
struct list_head ranges;
struct list_head mirrors;
struct mmu_notifier mmu_notifier;
struct rw_semaphore mirrors_sem;
wait_queue_head_t wq;
struct rcu_head rcu;
long notifiers;
bool dead;
};
/*
@@ -105,10 +104,11 @@ struct hmm {
* HMM_PFN_WRITE: CPU page table has write permission set
* HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
*
* The driver provide a flags array, if driver valid bit for an entry is bit
* 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide
* The driver provides a flags array for mapping page protections to device
* PTE bits. If the driver valid bit for an entry is bit 3,
* i.e., (entry & (1 << 3)), then the driver must provide
* an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
* Same logic apply to all flags. This is same idea as vm_page_prot in vma
* Same logic apply to all flags. This is the same idea as vm_page_prot in vma
* except that this is per device driver rather than per architecture.
*/
enum hmm_pfn_flag_e {
@@ -129,13 +129,13 @@ enum hmm_pfn_flag_e {
* be mirrored by a device, because the entry will never have HMM_PFN_VALID
* set and the pfn value is undefined.
*
* Driver provide entry value for none entry, error entry and special entry,
* driver can alias (ie use same value for error and special for instance). It
* should not alias none and error or special.
* Driver provides values for none entry, error entry, and special entry.
* Driver can alias (i.e., use same value) error and special, but
* it should not alias none with error or special.
*
* HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
* hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
* hmm_range.values[HMM_PFN_NONE] if there is no CPU page table
* hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
* hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
*/
enum hmm_pfn_value_e {
@@ -158,6 +158,7 @@ enum hmm_pfn_value_e {
* @values: pfn value for some special case (none, special, error, ...)
* @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
* @page_shift: device virtual address shift value (should be >= PAGE_SHIFT)
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
* @valid: pfns array did not change since it has been fill by an HMM function
*/
@@ -180,7 +181,7 @@ struct hmm_range {
/*
* hmm_range_page_shift() - return the page shift for the range
* @range: range being queried
* Returns: page shift (page size = 1 << page shift) for the range
* Return: page shift (page size = 1 << page shift) for the range
*/
static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
{
@@ -190,7 +191,7 @@ static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
/*
* hmm_range_page_size() - return the page size for the range
* @range: range being queried
* Returns: page size for the range in bytes
* Return: page size for the range in bytes
*/
static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
{
@@ -201,28 +202,19 @@ static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
* hmm_range_wait_until_valid() - wait for range to be valid
* @range: range affected by invalidation to wait on
* @timeout: time out for wait in ms (ie abort wait after that period of time)
* Returns: true if the range is valid, false otherwise.
* Return: true if the range is valid, false otherwise.
*/
static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
unsigned long timeout)
{
/* Check if mm is dead ? */
if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) {
range->valid = false;
return false;
}
if (range->valid)
return true;
wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead,
msecs_to_jiffies(timeout));
/* Return current valid status just in case we get lucky */
return range->valid;
return wait_event_timeout(range->hmm->wq, range->valid,
msecs_to_jiffies(timeout)) != 0;
}
/*
* hmm_range_valid() - test if a range is valid or not
* @range: range
* Returns: true if the range is valid, false otherwise.
* Return: true if the range is valid, false otherwise.
*/
static inline bool hmm_range_valid(struct hmm_range *range)
{
@@ -233,7 +225,7 @@ static inline bool hmm_range_valid(struct hmm_range *range)
* hmm_device_entry_to_page() - return struct page pointed to by a device entry
* @range: range use to decode device entry value
* @entry: device entry value to get corresponding struct page from
* Returns: struct page pointer if entry is a valid, NULL otherwise
* Return: struct page pointer if entry is a valid, NULL otherwise
*
* If the device entry is valid (ie valid flag set) then return the struct page
* matching the entry value. Otherwise return NULL.
@@ -256,7 +248,7 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang
* hmm_device_entry_to_pfn() - return pfn value store in a device entry
* @range: range use to decode device entry value
* @entry: device entry to extract pfn from
* Returns: pfn value if device entry is valid, -1UL otherwise
* Return: pfn value if device entry is valid, -1UL otherwise
*/
static inline unsigned long
hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
@@ -276,7 +268,7 @@ hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
* hmm_device_entry_from_page() - create a valid device entry for a page
* @range: range use to encode HMM pfn value
* @page: page for which to create the device entry
* Returns: valid device entry for the page
* Return: valid device entry for the page
*/
static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
struct page *page)
@@ -289,7 +281,7 @@ static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
* hmm_device_entry_from_pfn() - create a valid device entry value from pfn
* @range: range use to encode HMM pfn value
* @pfn: pfn value for which to create the device entry
* Returns: valid device entry for the pfn
* Return: valid device entry for the pfn
*/
static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
unsigned long pfn)
@@ -332,9 +324,6 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
return hmm_device_entry_from_pfn(range, pfn);
}
#if IS_ENABLED(CONFIG_HMM_MIRROR)
/*
* Mirroring: how to synchronize device page table with CPU page table.
*
@@ -394,7 +383,7 @@ enum hmm_update_event {
};
/*
* struct hmm_update - HMM update informations for callback
* struct hmm_update - HMM update information for callback
*
* @start: virtual start address of the range to update
* @end: virtual end address of the range to update
@@ -418,17 +407,18 @@ struct hmm_mirror_ops {
*
* @mirror: pointer to struct hmm_mirror
*
* This is called when the mm_struct is being released.
* The callback should make sure no references to the mirror occur
* after the callback returns.
* This is called when the mm_struct is being released. The callback
* must ensure that all access to any pages obtained from this mirror
* is halted before the callback returns. All future access should
* fault.
*/
void (*release)(struct hmm_mirror *mirror);
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
* @update: update informations (see struct hmm_update)
* Returns: -EAGAIN if update.blockable false and callback need to
* @update: update information (see struct hmm_update)
* Return: -EAGAIN if update.blockable false and callback need to
* block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
@@ -464,36 +454,11 @@ struct hmm_mirror {
int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
void hmm_mirror_unregister(struct hmm_mirror *mirror);
/*
* hmm_mirror_mm_is_alive() - test if mm is still alive
* @mirror: the HMM mm mirror for which we want to lock the mmap_sem
* Returns: false if the mm is dead, true otherwise
*
* This is an optimization it will not accurately always return -EINVAL if the
* mm is dead ie there can be false negative (process is being kill but HMM is
* not yet inform of that). It is only intented to be use to optimize out case
* where driver is about to do something time consuming and it would be better
* to skip it if the mm is dead.
*/
static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
{
struct mm_struct *mm;
if (!mirror || !mirror->hmm)
return false;
mm = READ_ONCE(mirror->hmm->mm);
if (mirror->hmm->dead || !mm)
return false;
return true;
}
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
int hmm_range_register(struct hmm_range *range,
struct mm_struct *mm,
struct hmm_mirror *mirror,
unsigned long start,
unsigned long end,
unsigned page_shift);
@@ -529,7 +494,8 @@ static inline bool hmm_vma_range_done(struct hmm_range *range)
}
/* This is a temporary helper to avoid merge conflict between trees. */
static inline int hmm_vma_fault(struct hmm_range *range, bool block)
static inline int hmm_vma_fault(struct hmm_mirror *mirror,
struct hmm_range *range, bool block)
{
long ret;
@@ -542,7 +508,7 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block)
range->default_flags = 0;
range->pfn_flags_mask = -1UL;
ret = hmm_range_register(range, range->vma->vm_mm,
ret = hmm_range_register(range, mirror,
range->start, range->end,
PAGE_SHIFT);
if (ret)
@@ -561,7 +527,7 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block)
ret = hmm_range_fault(range, block);
if (ret <= 0) {
if (ret == -EBUSY || !ret) {
/* Same as above drop mmap_sem to match old API. */
/* Same as above, drop mmap_sem to match old API. */
up_read(&range->vma->vm_mm->mmap_sem);
ret = -EBUSY;
} else if (ret == -EAGAIN)
@@ -573,208 +539,12 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block)
}
/* Below are for HMM internal use only! Not to be used by device driver! */
void hmm_mm_destroy(struct mm_struct *mm);
static inline void hmm_mm_init(struct mm_struct *mm)
{
mm->hmm = NULL;
}
#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
static inline void hmm_mm_destroy(struct mm_struct *mm) {}
static inline void hmm_mm_init(struct mm_struct *mm) {}
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
struct hmm_devmem;
struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
unsigned long addr);
/*
* struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
*
* @free: call when refcount on page reach 1 and thus is no longer use
* @fault: call when there is a page fault to unaddressable memory
*
* Both callback happens from page_free() and page_fault() callback of struct
* dev_pagemap respectively. See include/linux/memremap.h for more details on
* those.
*
* The hmm_devmem_ops callback are just here to provide a coherent and
* uniq API to device driver and device driver should not register their
* own page_free() or page_fault() but rely on the hmm_devmem_ops call-
* back.
*/
struct hmm_devmem_ops {
/*
* free() - free a device page
* @devmem: device memory structure (see struct hmm_devmem)
* @page: pointer to struct page being freed
*
* Call back occurs whenever a device page refcount reach 1 which
* means that no one is holding any reference on the page anymore
* (ZONE_DEVICE page have an elevated refcount of 1 as default so
* that they are not release to the general page allocator).
*
* Note that callback has exclusive ownership of the page (as no
* one is holding any reference).
*/
void (*free)(struct hmm_devmem *devmem, struct page *page);
/*
* fault() - CPU page fault or get user page (GUP)
* @devmem: device memory structure (see struct hmm_devmem)
* @vma: virtual memory area containing the virtual address
* @addr: virtual address that faulted or for which there is a GUP
* @page: pointer to struct page backing virtual address (unreliable)
* @flags: FAULT_FLAG_* (see include/linux/mm.h)
* @pmdp: page middle directory
* Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
* on error
*
* The callback occurs whenever there is a CPU page fault or GUP on a
* virtual address. This means that the device driver must migrate the
* page back to regular memory (CPU accessible).
*
* The device driver is free to migrate more than one page from the
* fault() callback as an optimization. However if device decide to
* migrate more than one page it must always priotirize the faulting
* address over the others.
*
* The struct page pointer is only given as an hint to allow quick
* lookup of internal device driver data. A concurrent migration
* might have already free that page and the virtual address might
* not longer be back by it. So it should not be modified by the
* callback.
*
* Note that mmap semaphore is held in read mode at least when this
* callback occurs, hence the vma is valid upon callback entry.
*/
vm_fault_t (*fault)(struct hmm_devmem *devmem,
struct vm_area_struct *vma,
unsigned long addr,
const struct page *page,
unsigned int flags,
pmd_t *pmdp);
};
/*
* struct hmm_devmem - track device memory
*
* @completion: completion object for device memory
* @pfn_first: first pfn for this resource (set by hmm_devmem_add())
* @pfn_last: last pfn for this resource (set by hmm_devmem_add())
* @resource: IO resource reserved for this chunk of memory
* @pagemap: device page map for that chunk
* @device: device to bind resource to
* @ops: memory operations callback
* @ref: per CPU refcount
* @page_fault: callback when CPU fault on an unaddressable device page
*
* This an helper structure for device drivers that do not wish to implement
* the gory details related to hotplugging new memoy and allocating struct
* pages.
*
* Device drivers can directly use ZONE_DEVICE memory on their own if they
* wish to do so.
*
* The page_fault() callback must migrate page back, from device memory to
* system memory, so that the CPU can access it. This might fail for various
* reasons (device issues, device have been unplugged, ...). When such error
* conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
* set the CPU page table entry to "poisoned".
*
* Note that because memory cgroup charges are transferred to the device memory,
* this should never fail due to memory restrictions. However, allocation
* of a regular system page might still fail because we are out of memory. If
* that happens, the page_fault() callback must return VM_FAULT_OOM.
*
* The page_fault() callback can also try to migrate back multiple pages in one
* chunk, as an optimization. It must, however, prioritize the faulting address
* over all the others.
*/
typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
unsigned long addr,
const struct page *page,
unsigned int flags,
pmd_t *pmdp);
struct hmm_devmem {
struct completion completion;
unsigned long pfn_first;
unsigned long pfn_last;
struct resource *resource;
struct device *device;
struct dev_pagemap pagemap;
const struct hmm_devmem_ops *ops;
struct percpu_ref ref;
dev_page_fault_t page_fault;
};
/*
* To add (hotplug) device memory, HMM assumes that there is no real resource
* that reserves a range in the physical address space (this is intended to be
* use by unaddressable device memory). It will reserve a physical range big
* enough and allocate struct page for it.
*
* The device driver can wrap the hmm_devmem struct inside a private device
* driver struct.
*/
struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
struct device *device,
unsigned long size);
struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
struct device *device,
struct resource *res);
/*
* hmm_devmem_page_set_drvdata - set per-page driver data field
*
* @page: pointer to struct page
* @data: driver data value to set
*
* Because page can not be on lru we have an unsigned long that driver can use
* to store a per page field. This just a simple helper to do that.
*/
static inline void hmm_devmem_page_set_drvdata(struct page *page,
unsigned long data)
{
page->hmm_data = data;
}
/*
* hmm_devmem_page_get_drvdata - get per page driver data field
*
* @page: pointer to struct page
* Return: driver data value
*/
static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
{
return page->hmm_data;
}
/*
* struct hmm_device - fake device to hang device memory onto
*
* @device: device struct
* @minor: device minor number
*/
struct hmm_device {
struct device device;
unsigned int minor;
};
/*
* A device driver that wants to handle multiple devices memory through a
* single fake device can use hmm_device to do so. This is purely a helper and
* it is not strictly needed, in order to make use of any HMM functionality.
*/
struct hmm_device *hmm_device_new(void *drvdata);
void hmm_device_put(struct hmm_device *hmm_device);
#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
#else /* IS_ENABLED(CONFIG_HMM) */
static inline void hmm_mm_destroy(struct mm_struct *mm) {}
static inline void hmm_mm_init(struct mm_struct *mm) {}
#endif /* IS_ENABLED(CONFIG_HMM) */
#endif /* LINUX_HMM_H */

View File

@@ -297,6 +297,8 @@ struct host1x_device {
struct list_head clients;
bool registered;
struct device_dma_parameters dma_parms;
};
static inline struct host1x_device *to_host1x_device(struct device *dev)

View File

@@ -121,6 +121,23 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
bool transparent_hugepage_enabled(struct vm_area_struct *vma);
#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
unsigned long haddr)
{
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
return false;
}
if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return false;
return true;
}
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -271,6 +288,12 @@ static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
return false;
}
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
unsigned long haddr)
{
return false;
}
static inline void prep_transhuge_page(struct page *page) {}
#define transparent_hugepage_flags 0UL

View File

@@ -1,7 +1,7 @@
/*
Hardware Random Number Generator
Please read Documentation/hw_random.txt for details on use.
Please read Documentation/admin-guide/hw_random.rst for details on use.
----------------------------------------------------------
This software may be used and distributed according to the terms

View File

@@ -14,9 +14,10 @@
#include <linux/sched.h>
/* hwspinlock mode argument */
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
#define HWLOCK_RAW 0x03
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
#define HWLOCK_RAW 0x03
#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
struct device;
struct device_node;
@@ -222,6 +223,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
}
/**
* hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
* This function attempts to lock an hwspinlock, and will immediately fail
* if the hwspinlock is already taken.
*
* This function shall be called only from an atomic context.
*
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
*/
static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
{
return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
}
/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
@@ -312,6 +330,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
}
/**
* hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
*
* This function locks the underlying @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up when @timeout msecs have elapsed.
*
* This function shall be called only from an atomic context and the timeout
* value shall not exceed a few msecs.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
static inline
int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
{
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
}
/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
@@ -386,6 +426,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
__hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
}
/**
* hwspin_unlock_in_atomic() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
* This function will unlock a specific hwspinlock.
*
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
* this function: it is a bug to call unlock on a @hwlock that is already
* unlocked.
*/
static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
{
__hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
}
/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock

View File

@@ -1,16 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ------------------------------------------------------------------------- */
/* */
/* i2c.h - definitions for the i2c-bus interface */
/* */
/* ------------------------------------------------------------------------- */
/* Copyright (C) 1995-2000 Simon G. Vogl
/*
* i2c.h - definitions for the Linux i2c bus interface
* Copyright (C) 1995-2000 Simon G. Vogl
* Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de>
*
* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
* Frodo Looijaard <frodol@dds.nl>
*/
/* ------------------------------------------------------------------------- */
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
Frodo Looijaard <frodol@dds.nl> */
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
@@ -40,7 +36,8 @@ struct i2c_device_identity;
union i2c_smbus_data;
struct i2c_board_info;
enum i2c_slave_event;
typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
typedef int (*i2c_slave_cb_t)(struct i2c_client *client,
enum i2c_slave_event event, u8 *val);
struct module;
struct property_entry;
@@ -257,16 +254,16 @@ struct i2c_driver {
unsigned int class;
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
int (*remove)(struct i2c_client *);
int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
int (*remove)(struct i2c_client *client);
/* New driver model interface to aid the seamless removal of the
* current probe()'s, more commonly unused than used second parameter.
*/
int (*probe_new)(struct i2c_client *);
int (*probe_new)(struct i2c_client *client);
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *);
void (*shutdown)(struct i2c_client *client);
/* Alert callback, for example for the SMBus alert protocol.
* The format and meaning of the data value depends on the protocol.
@@ -275,7 +272,7 @@ struct i2c_driver {
* For the SMBus Host Notify protocol, the data corresponds to the
* 16-bit payload data reported by the slave device acting as master.
*/
void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol,
void (*alert)(struct i2c_client *client, enum i2c_alert_protocol protocol,
unsigned int data);
/* a ioctl like command that can be used to perform specific functions
@@ -287,7 +284,7 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
int (*detect)(struct i2c_client *, struct i2c_board_info *);
int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
struct list_head clients;
@@ -297,8 +294,7 @@ struct i2c_driver {
/**
* struct i2c_client - represent an I2C slave device
* @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address;
* I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking
* @flags: see I2C_CLIENT_* for possible flags
* @addr: Address used on the I2C bus connected to the parent adapter.
* @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions.
@@ -316,6 +312,15 @@ struct i2c_driver {
*/
struct i2c_client {
unsigned short flags; /* div., see below */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
unsigned short addr; /* chip address - NOTE: 7bit */
/* addresses are stored in the */
/* _LOWER_ 7 bits */
@@ -437,6 +442,9 @@ struct i2c_board_info {
extern struct i2c_client *
i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
extern struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
/* If you don't know the exact address of an I2C device, use this variant
* instead, which can probe for device presence in a list of possible
* addresses. The "probe" callback function is optional. If it is provided,
@@ -447,10 +455,10 @@ extern struct i2c_client *
i2c_new_probed_device(struct i2c_adapter *adap,
struct i2c_board_info *info,
unsigned short const *addr_list,
int (*probe)(struct i2c_adapter *, unsigned short addr));
int (*probe)(struct i2c_adapter *adap, unsigned short addr));
/* Common custom probe functions */
extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
/* For devices that use several addresses, use i2c_new_dummy() to make
* client handles for the extra addresses.
@@ -458,6 +466,9 @@ extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
extern struct i2c_client *
i2c_new_dummy(struct i2c_adapter *adap, u16 address);
extern struct i2c_client *
i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address);
extern struct i2c_client *
devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
@@ -466,7 +477,7 @@ i2c_new_secondary_device(struct i2c_client *client,
const char *name,
u16 default_addr);
extern void i2c_unregister_device(struct i2c_client *);
extern void i2c_unregister_device(struct i2c_client *client);
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -551,9 +562,9 @@ struct i2c_algorithm {
* The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus.
*/
struct i2c_lock_operations {
void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
void (*lock_bus)(struct i2c_adapter *adapter, unsigned int flags);
int (*trylock_bus)(struct i2c_adapter *adapter, unsigned int flags);
void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags);
};
/**
@@ -703,14 +714,14 @@ struct i2c_adapter {
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
static inline void *i2c_get_adapdata(const struct i2c_adapter *adap)
{
return dev_get_drvdata(&dev->dev);
return dev_get_drvdata(&adap->dev);
}
static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
static inline void i2c_set_adapdata(struct i2c_adapter *adap, void *data)
{
dev_set_drvdata(&dev->dev, data);
dev_set_drvdata(&adap->dev, data);
}
static inline struct i2c_adapter *
@@ -726,7 +737,7 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
return NULL;
}
int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data));
/* Adapter locking functions, exported for shared pin cases */
#define I2C_LOCK_ROOT_ADAPTER BIT(0)
@@ -802,16 +813,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
}
/*flags for the client struct: */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
@@ -832,12 +833,12 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* administration...
*/
#if IS_ENABLED(CONFIG_I2C)
extern int i2c_add_adapter(struct i2c_adapter *);
extern void i2c_del_adapter(struct i2c_adapter *);
extern int i2c_add_numbered_adapter(struct i2c_adapter *);
extern int i2c_add_adapter(struct i2c_adapter *adap);
extern void i2c_del_adapter(struct i2c_adapter *adap);
extern int i2c_add_numbered_adapter(struct i2c_adapter *adap);
extern int i2c_register_driver(struct module *, struct i2c_driver *);
extern void i2c_del_driver(struct i2c_driver *);
extern int i2c_register_driver(struct module *owner, struct i2c_driver *driver);
extern void i2c_del_driver(struct i2c_driver *driver);
/* use a define to avoid include chaining to get THIS_MODULE */
#define i2c_add_driver(driver) \

View File

@@ -253,9 +253,9 @@ static inline void ide_std_init_ports(struct ide_hw *hw,
* Special Driver Flags
*/
enum {
IDE_SFLAG_SET_GEOMETRY = (1 << 0),
IDE_SFLAG_RECALIBRATE = (1 << 1),
IDE_SFLAG_SET_MULTMODE = (1 << 2),
IDE_SFLAG_SET_GEOMETRY = BIT(0),
IDE_SFLAG_RECALIBRATE = BIT(1),
IDE_SFLAG_SET_MULTMODE = BIT(2),
};
/*
@@ -267,13 +267,13 @@ typedef enum {
} ide_startstop_t;
enum {
IDE_VALID_ERROR = (1 << 1),
IDE_VALID_ERROR = BIT(1),
IDE_VALID_FEATURE = IDE_VALID_ERROR,
IDE_VALID_NSECT = (1 << 2),
IDE_VALID_LBAL = (1 << 3),
IDE_VALID_LBAM = (1 << 4),
IDE_VALID_LBAH = (1 << 5),
IDE_VALID_DEVICE = (1 << 6),
IDE_VALID_NSECT = BIT(2),
IDE_VALID_LBAL = BIT(3),
IDE_VALID_LBAM = BIT(4),
IDE_VALID_LBAH = BIT(5),
IDE_VALID_DEVICE = BIT(6),
IDE_VALID_LBA = IDE_VALID_LBAL |
IDE_VALID_LBAM |
IDE_VALID_LBAH,
@@ -289,24 +289,24 @@ enum {
};
enum {
IDE_TFLAG_LBA48 = (1 << 0),
IDE_TFLAG_WRITE = (1 << 1),
IDE_TFLAG_CUSTOM_HANDLER = (1 << 2),
IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3),
IDE_TFLAG_LBA48 = BIT(0),
IDE_TFLAG_WRITE = BIT(1),
IDE_TFLAG_CUSTOM_HANDLER = BIT(2),
IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3),
/* force 16-bit I/O operations */
IDE_TFLAG_IO_16BIT = (1 << 4),
IDE_TFLAG_IO_16BIT = BIT(4),
/* struct ide_cmd was allocated using kmalloc() */
IDE_TFLAG_DYN = (1 << 5),
IDE_TFLAG_FS = (1 << 6),
IDE_TFLAG_MULTI_PIO = (1 << 7),
IDE_TFLAG_SET_XFER = (1 << 8),
IDE_TFLAG_DYN = BIT(5),
IDE_TFLAG_FS = BIT(6),
IDE_TFLAG_MULTI_PIO = BIT(7),
IDE_TFLAG_SET_XFER = BIT(8),
};
enum {
IDE_FTFLAG_FLAGGED = (1 << 0),
IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
IDE_FTFLAG_OUT_DATA = (1 << 2),
IDE_FTFLAG_IN_DATA = (1 << 3),
IDE_FTFLAG_FLAGGED = BIT(0),
IDE_FTFLAG_SET_IN_FLAGS = BIT(1),
IDE_FTFLAG_OUT_DATA = BIT(2),
IDE_FTFLAG_IN_DATA = BIT(3),
};
struct ide_taskfile {
@@ -357,13 +357,13 @@ struct ide_cmd {
/* ATAPI packet command flags */
enum {
/* set when an error is considered normal - no retry (ide-tape) */
PC_FLAG_ABORT = (1 << 0),
PC_FLAG_SUPPRESS_ERROR = (1 << 1),
PC_FLAG_WAIT_FOR_DSC = (1 << 2),
PC_FLAG_DMA_OK = (1 << 3),
PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
PC_FLAG_DMA_ERROR = (1 << 5),
PC_FLAG_WRITING = (1 << 6),
PC_FLAG_ABORT = BIT(0),
PC_FLAG_SUPPRESS_ERROR = BIT(1),
PC_FLAG_WAIT_FOR_DSC = BIT(2),
PC_FLAG_DMA_OK = BIT(3),
PC_FLAG_DMA_IN_PROGRESS = BIT(4),
PC_FLAG_DMA_ERROR = BIT(5),
PC_FLAG_WRITING = BIT(6),
};
#define ATAPI_WAIT_PC (60 * HZ)
@@ -417,111 +417,111 @@ struct ide_disk_ops {
/* ATAPI device flags */
enum {
IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
IDE_AFLAG_DRQ_INTERRUPT = BIT(0),
/* ide-cd */
/* Drive cannot eject the disc. */
IDE_AFLAG_NO_EJECT = (1 << 1),
IDE_AFLAG_NO_EJECT = BIT(1),
/* Drive is a pre ATAPI 1.2 drive. */
IDE_AFLAG_PRE_ATAPI12 = (1 << 2),
IDE_AFLAG_PRE_ATAPI12 = BIT(2),
/* TOC addresses are in BCD. */
IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
IDE_AFLAG_TOCADDR_AS_BCD = BIT(3),
/* TOC track numbers are in BCD. */
IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4),
/* Saved TOC information is current. */
IDE_AFLAG_TOC_VALID = (1 << 6),
IDE_AFLAG_TOC_VALID = BIT(6),
/* We think that the drive door is locked. */
IDE_AFLAG_DOOR_LOCKED = (1 << 7),
IDE_AFLAG_DOOR_LOCKED = BIT(7),
/* SET_CD_SPEED command is unsupported. */
IDE_AFLAG_NO_SPEED_SELECT = (1 << 8),
IDE_AFLAG_VERTOS_300_SSD = (1 << 9),
IDE_AFLAG_VERTOS_600_ESD = (1 << 10),
IDE_AFLAG_SANYO_3CD = (1 << 11),
IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12),
IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13),
IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14),
IDE_AFLAG_NO_SPEED_SELECT = BIT(8),
IDE_AFLAG_VERTOS_300_SSD = BIT(9),
IDE_AFLAG_VERTOS_600_ESD = BIT(10),
IDE_AFLAG_SANYO_3CD = BIT(11),
IDE_AFLAG_FULL_CAPS_PAGE = BIT(12),
IDE_AFLAG_PLAY_AUDIO_OK = BIT(13),
IDE_AFLAG_LE_SPEED_FIELDS = BIT(14),
/* ide-floppy */
/* Avoid commands not supported in Clik drive */
IDE_AFLAG_CLIK_DRIVE = (1 << 15),
IDE_AFLAG_CLIK_DRIVE = BIT(15),
/* Requires BH algorithm for packets */
IDE_AFLAG_ZIP_DRIVE = (1 << 16),
IDE_AFLAG_ZIP_DRIVE = BIT(16),
/* Supports format progress report */
IDE_AFLAG_SRFP = (1 << 17),
IDE_AFLAG_SRFP = BIT(17),
/* ide-tape */
IDE_AFLAG_IGNORE_DSC = (1 << 18),
IDE_AFLAG_IGNORE_DSC = BIT(18),
/* 0 When the tape position is unknown */
IDE_AFLAG_ADDRESS_VALID = (1 << 19),
IDE_AFLAG_ADDRESS_VALID = BIT(19),
/* Device already opened */
IDE_AFLAG_BUSY = (1 << 20),
IDE_AFLAG_BUSY = BIT(20),
/* Attempt to auto-detect the current user block size */
IDE_AFLAG_DETECT_BS = (1 << 21),
IDE_AFLAG_DETECT_BS = BIT(21),
/* Currently on a filemark */
IDE_AFLAG_FILEMARK = (1 << 22),
IDE_AFLAG_FILEMARK = BIT(22),
/* 0 = no tape is loaded, so we don't rewind after ejecting */
IDE_AFLAG_MEDIUM_PRESENT = (1 << 23),
IDE_AFLAG_MEDIUM_PRESENT = BIT(23),
IDE_AFLAG_NO_AUTOCLOSE = (1 << 24),
IDE_AFLAG_NO_AUTOCLOSE = BIT(24),
};
/* device flags */
enum {
/* restore settings after device reset */
IDE_DFLAG_KEEP_SETTINGS = (1 << 0),
IDE_DFLAG_KEEP_SETTINGS = BIT(0),
/* device is using DMA for read/write */
IDE_DFLAG_USING_DMA = (1 << 1),
IDE_DFLAG_USING_DMA = BIT(1),
/* okay to unmask other IRQs */
IDE_DFLAG_UNMASK = (1 << 2),
IDE_DFLAG_UNMASK = BIT(2),
/* don't attempt flushes */
IDE_DFLAG_NOFLUSH = (1 << 3),
IDE_DFLAG_NOFLUSH = BIT(3),
/* DSC overlap */
IDE_DFLAG_DSC_OVERLAP = (1 << 4),
IDE_DFLAG_DSC_OVERLAP = BIT(4),
/* give potential excess bandwidth */
IDE_DFLAG_NICE1 = (1 << 5),
IDE_DFLAG_NICE1 = BIT(5),
/* device is physically present */
IDE_DFLAG_PRESENT = (1 << 6),
IDE_DFLAG_PRESENT = BIT(6),
/* disable Host Protected Area */
IDE_DFLAG_NOHPA = (1 << 7),
IDE_DFLAG_NOHPA = BIT(7),
/* id read from device (synthetic if not set) */
IDE_DFLAG_ID_READ = (1 << 8),
IDE_DFLAG_NOPROBE = (1 << 9),
IDE_DFLAG_ID_READ = BIT(8),
IDE_DFLAG_NOPROBE = BIT(9),
/* need to do check_media_change() */
IDE_DFLAG_REMOVABLE = (1 << 10),
IDE_DFLAG_REMOVABLE = BIT(10),
/* needed for removable devices */
IDE_DFLAG_ATTACH = (1 << 11),
IDE_DFLAG_FORCED_GEOM = (1 << 12),
IDE_DFLAG_ATTACH = BIT(11),
IDE_DFLAG_FORCED_GEOM = BIT(12),
/* disallow setting unmask bit */
IDE_DFLAG_NO_UNMASK = (1 << 13),
IDE_DFLAG_NO_UNMASK = BIT(13),
/* disallow enabling 32-bit I/O */
IDE_DFLAG_NO_IO_32BIT = (1 << 14),
IDE_DFLAG_NO_IO_32BIT = BIT(14),
/* for removable only: door lock/unlock works */
IDE_DFLAG_DOORLOCKING = (1 << 15),
IDE_DFLAG_DOORLOCKING = BIT(15),
/* disallow DMA */
IDE_DFLAG_NODMA = (1 << 16),
IDE_DFLAG_NODMA = BIT(16),
/* powermanagement told us not to do anything, so sleep nicely */
IDE_DFLAG_BLOCKED = (1 << 17),
IDE_DFLAG_BLOCKED = BIT(17),
/* sleeping & sleep field valid */
IDE_DFLAG_SLEEPING = (1 << 18),
IDE_DFLAG_POST_RESET = (1 << 19),
IDE_DFLAG_UDMA33_WARNED = (1 << 20),
IDE_DFLAG_LBA48 = (1 << 21),
IDE_DFLAG_SLEEPING = BIT(18),
IDE_DFLAG_POST_RESET = BIT(19),
IDE_DFLAG_UDMA33_WARNED = BIT(20),
IDE_DFLAG_LBA48 = BIT(21),
/* status of write cache */
IDE_DFLAG_WCACHE = (1 << 22),
IDE_DFLAG_WCACHE = BIT(22),
/* used for ignoring ATA_DF */
IDE_DFLAG_NOWERR = (1 << 23),
IDE_DFLAG_NOWERR = BIT(23),
/* retrying in PIO */
IDE_DFLAG_DMA_PIO_RETRY = (1 << 24),
IDE_DFLAG_LBA = (1 << 25),
IDE_DFLAG_DMA_PIO_RETRY = BIT(24),
IDE_DFLAG_LBA = BIT(25),
/* don't unload heads */
IDE_DFLAG_NO_UNLOAD = (1 << 26),
IDE_DFLAG_NO_UNLOAD = BIT(26),
/* heads unloaded, please don't reset port */
IDE_DFLAG_PARKED = (1 << 27),
IDE_DFLAG_MEDIA_CHANGED = (1 << 28),
IDE_DFLAG_PARKED = BIT(27),
IDE_DFLAG_MEDIA_CHANGED = BIT(28),
/* write protect */
IDE_DFLAG_WP = (1 << 29),
IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
IDE_DFLAG_NIEN_QUIRK = (1 << 31),
IDE_DFLAG_WP = BIT(29),
IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30),
IDE_DFLAG_NIEN_QUIRK = BIT(31),
};
struct ide_drive_s {
@@ -709,7 +709,7 @@ struct ide_dma_ops {
};
enum {
IDE_PFLAG_PROBING = (1 << 0),
IDE_PFLAG_PROBING = BIT(0),
};
struct ide_host;
@@ -862,7 +862,7 @@ extern struct mutex ide_setting_mtx;
* configurable drive settings
*/
#define DS_SYNC (1 << 0)
#define DS_SYNC BIT(0)
struct ide_devset {
int (*get)(ide_drive_t *);
@@ -1000,15 +1000,15 @@ static inline void ide_proc_unregister_driver(ide_drive_t *drive,
enum {
/* enter/exit functions */
IDE_DBG_FUNC = (1 << 0),
IDE_DBG_FUNC = BIT(0),
/* sense key/asc handling */
IDE_DBG_SENSE = (1 << 1),
IDE_DBG_SENSE = BIT(1),
/* packet commands handling */
IDE_DBG_PC = (1 << 2),
IDE_DBG_PC = BIT(2),
/* request handling */
IDE_DBG_RQ = (1 << 3),
IDE_DBG_RQ = BIT(3),
/* driver probing/setup */
IDE_DBG_PROBE = (1 << 4),
IDE_DBG_PROBE = BIT(4),
};
/* DRV_NAME has to be defined in the driver before using the macro below */
@@ -1171,10 +1171,10 @@ ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
* the tail of our block device request queue and wait for their completion.
*/
enum {
REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
REQ_IDETAPE_READ = (1 << 2),
REQ_IDETAPE_WRITE = (1 << 3),
REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */
REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */
REQ_IDETAPE_READ = BIT(2),
REQ_IDETAPE_WRITE = BIT(3),
};
int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
@@ -1264,71 +1264,71 @@ struct ide_pci_enablebit {
enum {
/* Uses ISA control ports not PCI ones. */
IDE_HFLAG_ISA_PORTS = (1 << 0),
IDE_HFLAG_ISA_PORTS = BIT(0),
/* single port device */
IDE_HFLAG_SINGLE = (1 << 1),
IDE_HFLAG_SINGLE = BIT(1),
/* don't use legacy PIO blacklist */
IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2),
/* set for the second port of QD65xx */
IDE_HFLAG_QD_2ND_PORT = (1 << 3),
IDE_HFLAG_QD_2ND_PORT = BIT(3),
/* use PIO8/9 for prefetch off/on */
IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
IDE_HFLAG_ABUSE_PREFETCH = BIT(4),
/* use PIO6/7 for fast-devsel off/on */
IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5),
/* use 100-102 and 200-202 PIO values to set DMA modes */
IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
IDE_HFLAG_ABUSE_DMA_MODES = BIT(6),
/*
* keep DMA setting when programming PIO mode, may be used only
* for hosts which have separate PIO and DMA timings (ie. PMAC)
*/
IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7),
/* program host for the transfer mode after programming device */
IDE_HFLAG_POST_SET_MODE = (1 << 8),
IDE_HFLAG_POST_SET_MODE = BIT(8),
/* don't program host/device for the transfer mode ("smart" hosts) */
IDE_HFLAG_NO_SET_MODE = (1 << 9),
IDE_HFLAG_NO_SET_MODE = BIT(9),
/* trust BIOS for programming chipset/device for DMA */
IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10),
/* host is CS5510/CS5520 */
IDE_HFLAG_CS5520 = (1 << 11),
IDE_HFLAG_CS5520 = BIT(11),
/* ATAPI DMA is unsupported */
IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
IDE_HFLAG_NO_ATAPI_DMA = BIT(12),
/* set if host is a "non-bootable" controller */
IDE_HFLAG_NON_BOOTABLE = (1 << 13),
IDE_HFLAG_NON_BOOTABLE = BIT(13),
/* host doesn't support DMA */
IDE_HFLAG_NO_DMA = (1 << 14),
IDE_HFLAG_NO_DMA = BIT(14),
/* check if host is PCI IDE device before allowing DMA */
IDE_HFLAG_NO_AUTODMA = (1 << 15),
IDE_HFLAG_NO_AUTODMA = BIT(15),
/* host uses MMIO */
IDE_HFLAG_MMIO = (1 << 16),
IDE_HFLAG_MMIO = BIT(16),
/* no LBA48 */
IDE_HFLAG_NO_LBA48 = (1 << 17),
IDE_HFLAG_NO_LBA48 = BIT(17),
/* no LBA48 DMA */
IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
IDE_HFLAG_NO_LBA48_DMA = BIT(18),
/* data FIFO is cleared by an error */
IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19),
/* serialize ports */
IDE_HFLAG_SERIALIZE = (1 << 20),
IDE_HFLAG_SERIALIZE = BIT(20),
/* host is DTC2278 */
IDE_HFLAG_DTC2278 = (1 << 21),
IDE_HFLAG_DTC2278 = BIT(21),
/* 4 devices on a single set of I/O ports */
IDE_HFLAG_4DRIVES = (1 << 22),
IDE_HFLAG_4DRIVES = BIT(22),
/* host is TRM290 */
IDE_HFLAG_TRM290 = (1 << 23),
IDE_HFLAG_TRM290 = BIT(23),
/* use 32-bit I/O ops */
IDE_HFLAG_IO_32BIT = (1 << 24),
IDE_HFLAG_IO_32BIT = BIT(24),
/* unmask IRQs */
IDE_HFLAG_UNMASK_IRQS = (1 << 25),
IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
IDE_HFLAG_UNMASK_IRQS = BIT(25),
IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26),
/* serialize ports if DMA is possible (for sl82c105) */
IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
IDE_HFLAG_SERIALIZE_DMA = BIT(27),
/* force host out of "simplex" mode */
IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
IDE_HFLAG_CLEAR_SIMPLEX = BIT(28),
/* DSC overlap is unsupported */
IDE_HFLAG_NO_DSC = (1 << 29),
IDE_HFLAG_NO_DSC = BIT(29),
/* never use 32-bit I/O ops */
IDE_HFLAG_NO_IO_32BIT = (1 << 30),
IDE_HFLAG_NO_IO_32BIT = BIT(30),
/* never unmask IRQs */
IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
IDE_HFLAG_NO_UNMASK_IRQS = BIT(31),
};
#ifdef CONFIG_BLK_DEV_OFFBOARD
@@ -1536,16 +1536,16 @@ struct ide_timing {
};
enum {
IDE_TIMING_SETUP = (1 << 0),
IDE_TIMING_ACT8B = (1 << 1),
IDE_TIMING_REC8B = (1 << 2),
IDE_TIMING_CYC8B = (1 << 3),
IDE_TIMING_SETUP = BIT(0),
IDE_TIMING_ACT8B = BIT(1),
IDE_TIMING_REC8B = BIT(2),
IDE_TIMING_CYC8B = BIT(3),
IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
IDE_TIMING_CYC8B,
IDE_TIMING_ACTIVE = (1 << 4),
IDE_TIMING_RECOVER = (1 << 5),
IDE_TIMING_CYCLE = (1 << 6),
IDE_TIMING_UDMA = (1 << 7),
IDE_TIMING_ACTIVE = BIT(4),
IDE_TIMING_RECOVER = BIT(5),
IDE_TIMING_CYCLE = BIT(6),
IDE_TIMING_UDMA = BIT(7),
IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
IDE_TIMING_CYCLE | IDE_TIMING_UDMA,

View File

@@ -137,6 +137,8 @@ extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
/* Used for contructor calls. */
typedef void (*ctor_fn_t)(void);
struct file_system_type;
/* Defined in init/main.c */
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
@@ -146,7 +148,8 @@ extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
int __init init_rootfs(void);
void __init init_rootfs(void);
extern struct file_system_type rootfs_fs_type;
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;

View File

@@ -0,0 +1,76 @@
/*
* Elan I2C/SMBus Touchpad device whitelist
*
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: æ (Duson Lin) <dusonlin@emc.com.tw>
* Author: KT Liao <kt.liao@emc.com.tw>
* Version: 1.6.3
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
* copyright (c) 2011-2012 Google, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Trademarks are the property of their respective owners.
*/
#ifndef __ELAN_I2C_IDS_H
#define __ELAN_I2C_IDS_H
#include <linux/mod_devicetable.h>
static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
{ "ELAN0601", 0 },
{ "ELAN0602", 0 },
{ "ELAN0603", 0 },
{ "ELAN0604", 0 },
{ "ELAN0605", 0 },
{ "ELAN0606", 0 },
{ "ELAN0607", 0 },
{ "ELAN0608", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN060C", 0 },
{ "ELAN060F", 0 },
{ "ELAN0610", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0615", 0 },
{ "ELAN0616", 0 },
{ "ELAN0617", 0 },
{ "ELAN0618", 0 },
{ "ELAN0619", 0 },
{ "ELAN061A", 0 },
{ "ELAN061B", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
{ "ELAN061E", 0 },
{ "ELAN061F", 0 },
{ "ELAN0620", 0 },
{ "ELAN0621", 0 },
{ "ELAN0622", 0 },
{ "ELAN0623", 0 },
{ "ELAN0624", 0 },
{ "ELAN0625", 0 },
{ "ELAN0626", 0 },
{ "ELAN0627", 0 },
{ "ELAN0628", 0 },
{ "ELAN0629", 0 },
{ "ELAN062A", 0 },
{ "ELAN062B", 0 },
{ "ELAN062C", 0 },
{ "ELAN062D", 0 },
{ "ELAN0631", 0 },
{ "ELAN0632", 0 },
{ "ELAN1000", 0 },
{ }
};
#endif /* __ELAN_I2C_IDS_H */

155
include/linux/intel_rapl.h Normal file
View File

@@ -0,0 +1,155 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Data types and headers for RAPL support
*
* Copyright (C) 2019 Intel Corporation.
*
* Author: Zhang Rui <rui.zhang@intel.com>
*/
#ifndef __INTEL_RAPL_H__
#define __INTEL_RAPL_H__
#include <linux/types.h>
#include <linux/powercap.h>
#include <linux/cpuhotplug.h>
enum rapl_domain_type {
RAPL_DOMAIN_PACKAGE, /* entire package/socket */
RAPL_DOMAIN_PP0, /* core power plane */
RAPL_DOMAIN_PP1, /* graphics uncore */
RAPL_DOMAIN_DRAM, /* DRAM control_type */
RAPL_DOMAIN_PLATFORM, /* PSys control_type */
RAPL_DOMAIN_MAX,
};
enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_LIMIT,
RAPL_DOMAIN_REG_STATUS,
RAPL_DOMAIN_REG_PERF,
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
RAPL_DOMAIN_REG_MAX,
};
struct rapl_package;
enum rapl_primitives {
ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
FW_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
PL2_CLAMP,
TIME_WINDOW1, /* long term */
TIME_WINDOW2, /* short term */
THERMAL_SPEC_POWER,
MAX_POWER,
MIN_POWER,
MAX_TIME_WINDOW,
THROTTLED_TIME,
PRIORITY_LEVEL,
/* below are not raw primitive data */
AVERAGE_POWER,
NR_RAPL_PRIMITIVES,
};
struct rapl_domain_data {
u64 primitives[NR_RAPL_PRIMITIVES];
unsigned long timestamp;
};
#define NR_POWER_LIMITS (2)
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
u64 last_power_limit;
};
struct rapl_package;
struct rapl_domain {
const char *name;
enum rapl_domain_type id;
u64 regs[RAPL_DOMAIN_REG_MAX];
struct powercap_zone power_zone;
struct rapl_domain_data rdd;
struct rapl_power_limit rpl[NR_POWER_LIMITS];
u64 attr_map; /* track capabilities */
unsigned int state;
unsigned int domain_energy_unit;
struct rapl_package *rp;
};
struct reg_action {
u64 reg;
u64 mask;
u64 value;
int err;
};
/**
* struct rapl_if_priv: private data for different RAPL interfaces
* @control_type: Each RAPL interface must have its own powercap
* control type.
* @platform_rapl_domain: Optional. Some RAPL interface may have platform
* level RAPL control.
* @pcap_rapl_online: CPU hotplug state for each RAPL interface.
* @reg_unit: Register for getting energy/power/time unit.
* @regs: Register sets for different RAPL Domains.
* @limits: Number of power limits supported by each domain.
* @read_raw: Callback for reading RAPL interface specific
* registers.
* @write_raw: Callback for writing RAPL interface specific
* registers.
*/
struct rapl_if_priv {
struct powercap_control_type *control_type;
struct rapl_domain *platform_rapl_domain;
enum cpuhp_state pcap_rapl_online;
u64 reg_unit;
u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
int limits[RAPL_DOMAIN_MAX];
int (*read_raw)(int cpu, struct reg_action *ra);
int (*write_raw)(int cpu, struct reg_action *ra);
};
/* maximum rapl package domain name: package-%d-die-%d */
#define PACKAGE_DOMAIN_NAME_LENGTH 30
struct rapl_package {
unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
unsigned int power_unit;
unsigned int energy_unit;
unsigned int time_unit;
struct rapl_domain *domains; /* array of domains, sized at runtime */
struct powercap_zone *power_zone; /* keep track of parent zone */
unsigned long power_limit_irq; /* keep track of package power limit
* notify interrupt enable status.
*/
struct list_head plist;
int lead_cpu; /* one active cpu per package for access */
/* Track active cpus */
struct cpumask cpumask;
char name[PACKAGE_DOMAIN_NAME_LENGTH];
struct rapl_if_priv *priv;
};
struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
void rapl_remove_package(struct rapl_package *rp);
int rapl_add_platform_domain(struct rapl_if_priv *priv);
void rapl_remove_platform_domain(struct rapl_if_priv *priv);
#endif /* __INTEL_RAPL_H__ */

View File

@@ -52,7 +52,7 @@
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
* that this interrupt will wake the system from a suspended
* state. See Documentation/power/suspend-and-interrupts.txt
* state. See Documentation/power/suspend-and-interrupts.rst
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device

View File

@@ -33,6 +33,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
void __init ioremap_huge_init(void);
int arch_ioremap_p4d_supported(void);
int arch_ioremap_pud_supported(void);
int arch_ioremap_pmd_supported(void);
#else

View File

@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mm_types.h>
#include <linux/blkdev.h>
struct address_space;
struct fiemap_extent_info;
@@ -69,6 +70,12 @@ struct iomap {
const struct iomap_page_ops *page_ops;
};
static inline sector_t
iomap_sector(struct iomap *iomap, loff_t pos)
{
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
/*
* When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
* and page_done will be called for each page written to. This only applies to
@@ -115,6 +122,16 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
/*
* Main iomap iterator function.
*/
typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
void *data, struct iomap *iomap);
loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, const struct iomap_ops *ops, void *data,
iomap_actor_t actor);
/*
* Structure allocate for each page when block size < PAGE_SIZE to track
* sub-page uptodate status and I/O completions.

View File

@@ -133,8 +133,7 @@ enum {
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_DEVICE_PUBLIC_MEMORY = 7,
IORES_DESC_RESERVED = 8,
IORES_DESC_RESERVED = 7,
};
/*
@@ -296,6 +295,8 @@ static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
return (r1->start <= r2->end && r1->end >= r2->start);
}
struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */

View File

@@ -112,6 +112,30 @@ inode_peek_iversion_raw(const struct inode *inode)
return atomic64_read(&inode->i_version);
}
/**
* inode_set_max_iversion_raw - update i_version new value is larger
* @inode: inode to set
* @val: new i_version to set
*
* Some self-managed filesystems (e.g Ceph) will only update the i_version
* value if the new value is larger than the one we already have.
*/
static inline void
inode_set_max_iversion_raw(struct inode *inode, u64 val)
{
u64 cur, old;
cur = inode_peek_iversion_raw(inode);
for (;;) {
if (cur > val)
break;
old = atomic64_cmpxchg(&inode->i_version, cur, val);
if (likely(old == cur))
break;
cur = old;
}
}
/**
* inode_set_iversion - set i_version to a particular value
* @inode: inode to set

View File

@@ -88,6 +88,8 @@
*/
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define typeof_member(T, m) typeof(((T*)0)->m)
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \

View File

@@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr)
}
#endif
/* Returns true if kprobes handled the fault */
static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
unsigned int trap)
{
if (!kprobes_built_in())
return false;
if (user_mode(regs))
return false;
/*
* To be potentially processing a kprobe fault and to be allowed
* to call kprobe_running(), we have to be non-preemptible.
*/
if (preemptible())
return false;
if (!kprobe_running())
return false;
return kprobe_fault_handler(regs, trap);
}
#endif /* _LINUX_KPROBES_H */

View File

@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/uuid.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
struct badrange_entry {
u64 start;
@@ -57,6 +58,9 @@ enum {
*/
ND_REGION_PERSIST_MEMCTRL = 2,
/* Platform provides asynchronous flush mechanism */
ND_REGION_ASYNC = 3,
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -113,6 +117,7 @@ struct nd_mapping_desc {
int position;
};
struct nd_region;
struct nd_region_desc {
struct resource *res;
struct nd_mapping_desc *mapping;
@@ -125,6 +130,7 @@ struct nd_region_desc {
int target_node;
unsigned long flags;
struct device_node *of_node;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
struct device;
@@ -252,10 +258,12 @@ unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
void nvdimm_flush(struct nd_region *nd_region);
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
int generic_nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
int nvdimm_in_overwrite(struct nvdimm *nvdimm);
bool is_nvdimm_sync(struct nd_region *nd_region);
static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc)

View File

@@ -5,7 +5,7 @@
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* see Documentation/locking/lockdep-design.txt for more details.
* see Documentation/locking/lockdep-design.rst for more details.
*/
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H

View File

@@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
* Decompresses data fom 'source' into 'dest'.
* Decompresses data from 'source' into 'dest'.
* If the source stream is detected malformed, the function will
* stop decoding and return a negative result.
* This function is protected against buffer overflow exploits,
@@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize);
/**
* LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
* LZ4_decompress_safe_continue() - Decompress blocks in streaming mode
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
@@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
* These decoding function allows decompression of multiple blocks
* This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* which must be already allocated with 'originalSize' bytes
* @originalSize: is the original and therefore uncompressed size
*
* These decoding function allows decompression of multiple blocks
* This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
* These decoding function works the same as
* This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
* It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
* It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
@@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
* These decoding function works the same as
* This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
* It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
* LZ4_decompress_fast_continue()
* It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)

View File

@@ -111,16 +111,15 @@ extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int hotplug_memory_register(int nid, struct mem_section *section);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern void unregister_memory_section(struct mem_section *);
#endif
int create_memory_block_devices(unsigned long start, unsigned long size);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern int memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block_hinted(struct mem_section *,
struct memory_block *);
extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */

View File

@@ -123,20 +123,10 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap);
extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
#endif /* CONFIG_MEMORY_HOTREMOVE */
/*
* Do we want sysfs memblock files created. This will allow userspace to online
* and offline memory explicitly. Lack of this bit means that the caller has to
* call move_pfn_range_to_zone to finish the initialization.
*/
#define MHP_MEMBLOCK_API (1<<0)
/* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
@@ -324,7 +314,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
extern int remove_memory(int nid, u64 start, u64 size);
extern void __remove_memory(int nid, u64 start, u64 size);
#else
@@ -341,22 +331,25 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
return -EINVAL;
}
static inline void remove_memory(int nid, u64 start, u64 size) {}
static inline int remove_memory(int nid, u64 start, u64 size)
{
return -EBUSY;
}
static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern void __ref free_area_init_core_hotplug(int nid);
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_one_section(int nid, unsigned long start_pfn,
struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
extern int sparse_add_section(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern void sparse_remove_section(struct mem_section *ms,
unsigned long pfn, unsigned long nr_pages,
unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);

View File

@@ -37,13 +37,6 @@ struct vmem_altmap {
* A more complete discussion of unaddressable memory may be found in
* include/linux/hmm.h and Documentation/vm/hmm.rst.
*
* MEMORY_DEVICE_PUBLIC:
* Device memory that is cache coherent from device and CPU point of view. This
* is use on platform that have an advance system bus (like CAPI or CCIX). A
* driver can hotplug the device memory using ZONE_DEVICE and with that memory
* type. Any page of a process can be migrated to such memory. However no one
* should be allow to pin such memory so that it can always be evicted.
*
* MEMORY_DEVICE_FS_DAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
* coherent and supports page pinning. In support of coordinating page
@@ -52,54 +45,84 @@ struct vmem_altmap {
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
* MEMORY_DEVICE_DEVDAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
* coherent and supports page pinning. In contrast to
* MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
* character device.
*
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
* transactions.
*/
enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_PUBLIC,
MEMORY_DEVICE_FS_DAX,
MEMORY_DEVICE_DEVDAX,
MEMORY_DEVICE_PCI_P2PDMA,
};
/*
* Additional notes about MEMORY_DEVICE_PRIVATE may be found in
* include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief
* explanation in include/linux/memory_hotplug.h.
*
* The page_free() callback is called once the page refcount reaches 1
* (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug.
* This allows the device driver to implement its own memory management.)
*/
typedef void (*dev_page_free_t)(struct page *page, void *data);
struct dev_pagemap_ops {
/*
* Called once the page refcount reaches 1. (ZONE_DEVICE pages never
* reach 0 refcount unless there is a refcount bug. This allows the
* device driver to implement its own memory management.)
*/
void (*page_free)(struct page *page);
/*
* Transition the refcount in struct dev_pagemap to the dead state.
*/
void (*kill)(struct dev_pagemap *pgmap);
/*
* Wait for refcount in struct dev_pagemap to be idle and reap it.
*/
void (*cleanup)(struct dev_pagemap *pgmap);
/*
* Used for private (un-addressable) device memory only. Must migrate
* the page back to a CPU accessible page.
*/
vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @page_free: free page callback when page refcount reaches 1
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
* @kill: callback to transition @ref to the dead state
* @cleanup: callback to wait for @ref to be idle and reap it
* @internal_ref: internal reference if @ref is not provided by the caller
* @done: completion for @internal_ref
* @dev: host device of the mapping for debug
* @data: private data pointer for page_free()
* @type: memory type: see MEMORY_* in memory_hotplug.h
* @flags: PGMAP_* flags to specify defailed behavior
* @ops: method table
*/
struct dev_pagemap {
dev_page_free_t page_free;
struct vmem_altmap altmap;
bool altmap_valid;
struct resource res;
struct percpu_ref *ref;
void (*kill)(struct percpu_ref *ref);
void (*cleanup)(struct percpu_ref *ref);
struct percpu_ref internal_ref;
struct completion done;
struct device *dev;
void *data;
enum memory_type type;
unsigned int flags;
u64 pci_p2pdma_bus_offset;
const struct dev_pagemap_ops *ops;
};
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
return &pgmap->altmap;
return NULL;
}
#ifdef CONFIG_ZONE_DEVICE
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);

View File

@@ -19,6 +19,7 @@
#define CROS_EC_DEV_PD_NAME "cros_pd"
#define CROS_EC_DEV_TP_NAME "cros_tp"
#define CROS_EC_DEV_ISH_NAME "cros_ish"
#define CROS_EC_DEV_SCP_NAME "cros_scp"
/*
* The EC is unresponsive for a time after a reboot command. Add a

View File

@@ -14,6 +14,7 @@
enum lp87565_device_type {
LP87565_DEVICE_TYPE_UNKNOWN = 0,
LP87565_DEVICE_TYPE_LP87561_Q1,
LP87565_DEVICE_TYPE_LP87565_Q1,
};
@@ -246,6 +247,7 @@ enum LP87565_regulator_id {
LP87565_BUCK_3,
LP87565_BUCK_10,
LP87565_BUCK_23,
LP87565_BUCK_3210,
};
/**

View File

@@ -1,12 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD internals for Cirrus Logic Madera codecs
*
* Copyright (C) 2015-2018 Cirrus Logic
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2.
*/
#ifndef MADERA_CORE_H
@@ -26,15 +22,21 @@ enum madera_type {
CS47L85 = 2,
CS47L90 = 3,
CS47L91 = 4,
CS47L92 = 5,
CS47L93 = 6,
WM1840 = 7,
CS47L15 = 8,
CS42L92 = 9,
};
#define MADERA_MAX_CORE_SUPPLIES 2
#define MADERA_MAX_GPIOS 40
#define CS47L15_NUM_GPIOS 15
#define CS47L35_NUM_GPIOS 16
#define CS47L85_NUM_GPIOS 40
#define CS47L90_NUM_GPIOS 38
#define CS47L92_NUM_GPIOS 16
#define MADERA_MAX_MICBIAS 4

View File

@@ -1,12 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for Cirrus Logic Madera codecs
*
* Copyright (C) 2015-2018 Cirrus Logic
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2.
*/
#ifndef MADERA_PDATA_H
@@ -35,7 +31,8 @@ struct madera_codec_pdata;
* @micvdd: Substruct of pdata for the MICVDD regulator
* @irq_flags: Mode for primary IRQ (defaults to active low)
* @gpio_base: Base GPIO number
* @gpio_configs: Array of GPIO configurations (See Documentation/pinctrl.txt)
* @gpio_configs: Array of GPIO configurations (See
* Documentation/driver-api/pinctl.rst)
* @n_gpio_configs: Number of entries in gpio_configs
* @gpsw: General purpose switch mode setting. Depends on the external
* hardware connected to the switch. (See the SW1_MODE field

View File

@@ -1,12 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Madera register definitions
*
* Copyright (C) 2015-2018 Cirrus Logic
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2.
*/
#ifndef MADERA_REGISTERS_H
@@ -76,10 +72,14 @@
#define MADERA_FLL1_CONTROL_4 0x174
#define MADERA_FLL1_CONTROL_5 0x175
#define MADERA_FLL1_CONTROL_6 0x176
#define MADERA_FLL1_LOOP_FILTER_TEST_1 0x177
#define MADERA_FLL1_NCO_TEST_0 0x178
#define CS47L92_FLL1_CONTROL_7 0x177
#define CS47L92_FLL1_CONTROL_8 0x178
#define MADERA_FLL1_CONTROL_7 0x179
#define CS47L92_FLL1_CONTROL_9 0x179
#define MADERA_FLL1_EFS_2 0x17A
#define CS47L92_FLL1_CONTROL_10 0x17A
#define MADERA_FLL1_CONTROL_11 0x17B
#define MADERA_FLL1_DIGITAL_TEST_1 0x17D
#define CS47L35_FLL1_SYNCHRONISER_1 0x17F
#define CS47L35_FLL1_SYNCHRONISER_2 0x180
#define CS47L35_FLL1_SYNCHRONISER_3 0x181
@@ -98,16 +98,21 @@
#define MADERA_FLL1_SYNCHRONISER_7 0x187
#define MADERA_FLL1_SPREAD_SPECTRUM 0x189
#define MADERA_FLL1_GPIO_CLOCK 0x18A
#define CS47L92_FLL1_GPIO_CLOCK 0x18E
#define MADERA_FLL2_CONTROL_1 0x191
#define MADERA_FLL2_CONTROL_2 0x192
#define MADERA_FLL2_CONTROL_3 0x193
#define MADERA_FLL2_CONTROL_4 0x194
#define MADERA_FLL2_CONTROL_5 0x195
#define MADERA_FLL2_CONTROL_6 0x196
#define MADERA_FLL2_LOOP_FILTER_TEST_1 0x197
#define MADERA_FLL2_NCO_TEST_0 0x198
#define CS47L92_FLL2_CONTROL_7 0x197
#define CS47L92_FLL2_CONTROL_8 0x198
#define MADERA_FLL2_CONTROL_7 0x199
#define CS47L92_FLL2_CONTROL_9 0x199
#define MADERA_FLL2_EFS_2 0x19A
#define CS47L92_FLL2_CONTROL_10 0x19A
#define MADERA_FLL2_CONTROL_11 0x19B
#define MADERA_FLL2_DIGITAL_TEST_1 0x19D
#define MADERA_FLL2_SYNCHRONISER_1 0x1A1
#define MADERA_FLL2_SYNCHRONISER_2 0x1A2
#define MADERA_FLL2_SYNCHRONISER_3 0x1A3
@@ -117,14 +122,13 @@
#define MADERA_FLL2_SYNCHRONISER_7 0x1A7
#define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9
#define MADERA_FLL2_GPIO_CLOCK 0x1AA
#define CS47L92_FLL2_GPIO_CLOCK 0x1AE
#define MADERA_FLL3_CONTROL_1 0x1B1
#define MADERA_FLL3_CONTROL_2 0x1B2
#define MADERA_FLL3_CONTROL_3 0x1B3
#define MADERA_FLL3_CONTROL_4 0x1B4
#define MADERA_FLL3_CONTROL_5 0x1B5
#define MADERA_FLL3_CONTROL_6 0x1B6
#define MADERA_FLL3_LOOP_FILTER_TEST_1 0x1B7
#define MADERA_FLL3_NCO_TEST_0 0x1B8
#define MADERA_FLL3_CONTROL_7 0x1B9
#define MADERA_FLL3_SYNCHRONISER_1 0x1C1
#define MADERA_FLL3_SYNCHRONISER_2 0x1C2
@@ -244,6 +248,8 @@
#define MADERA_IN6R_CONTROL 0x33C
#define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D
#define MADERA_DMIC6R_CONTROL 0x33E
#define CS47L15_ADC_INT_BIAS 0x3A8
#define CS47L15_PGA_BIAS_SEL 0x3C4
#define MADERA_OUTPUT_ENABLES_1 0x400
#define MADERA_OUTPUT_STATUS_1 0x401
#define MADERA_RAW_OUTPUT_STATUS_1 0x406
@@ -265,6 +271,7 @@
#define MADERA_NOISE_GATE_SELECT_2R 0x41F
#define MADERA_OUTPUT_PATH_CONFIG_3L 0x420
#define MADERA_DAC_DIGITAL_VOLUME_3L 0x421
#define MADERA_OUTPUT_PATH_CONFIG_3 0x422
#define MADERA_NOISE_GATE_SELECT_3L 0x423
#define MADERA_OUTPUT_PATH_CONFIG_3R 0x424
#define MADERA_DAC_DIGITAL_VOLUME_3R 0x425
@@ -287,9 +294,6 @@
#define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C
#define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D
#define MADERA_NOISE_GATE_SELECT_6R 0x43F
#define MADERA_DRE_ENABLE 0x440
#define MADERA_EDRE_ENABLE 0x448
#define MADERA_EDRE_MANUAL 0x44A
#define MADERA_DAC_AEC_CONTROL_1 0x450
#define MADERA_DAC_AEC_CONTROL_2 0x451
#define MADERA_NOISE_GATE_CONTROL 0x458
@@ -367,8 +371,20 @@
#define MADERA_AIF3_FRAME_CTRL_2 0x588
#define MADERA_AIF3_FRAME_CTRL_3 0x589
#define MADERA_AIF3_FRAME_CTRL_4 0x58A
#define MADERA_AIF3_FRAME_CTRL_5 0x58B
#define MADERA_AIF3_FRAME_CTRL_6 0x58C
#define MADERA_AIF3_FRAME_CTRL_7 0x58D
#define MADERA_AIF3_FRAME_CTRL_8 0x58E
#define MADERA_AIF3_FRAME_CTRL_9 0x58F
#define MADERA_AIF3_FRAME_CTRL_10 0x590
#define MADERA_AIF3_FRAME_CTRL_11 0x591
#define MADERA_AIF3_FRAME_CTRL_12 0x592
#define MADERA_AIF3_FRAME_CTRL_13 0x593
#define MADERA_AIF3_FRAME_CTRL_14 0x594
#define MADERA_AIF3_FRAME_CTRL_15 0x595
#define MADERA_AIF3_FRAME_CTRL_16 0x596
#define MADERA_AIF3_FRAME_CTRL_17 0x597
#define MADERA_AIF3_FRAME_CTRL_18 0x598
#define MADERA_AIF3_TX_ENABLES 0x599
#define MADERA_AIF3_RX_ENABLES 0x59A
#define MADERA_AIF3_FORCE_WRITE 0x59B
@@ -660,6 +676,54 @@
#define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D
#define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E
#define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F
#define MADERA_AIF3TX3MIX_INPUT_1_SOURCE 0x790
#define MADERA_AIF3TX3MIX_INPUT_1_VOLUME 0x791
#define MADERA_AIF3TX3MIX_INPUT_2_SOURCE 0x792
#define MADERA_AIF3TX3MIX_INPUT_2_VOLUME 0x793
#define MADERA_AIF3TX3MIX_INPUT_3_SOURCE 0x794
#define MADERA_AIF3TX3MIX_INPUT_3_VOLUME 0x795
#define MADERA_AIF3TX3MIX_INPUT_4_SOURCE 0x796
#define MADERA_AIF3TX3MIX_INPUT_4_VOLUME 0x797
#define MADERA_AIF3TX4MIX_INPUT_1_SOURCE 0x798
#define MADERA_AIF3TX4MIX_INPUT_1_VOLUME 0x799
#define MADERA_AIF3TX4MIX_INPUT_2_SOURCE 0x79A
#define MADERA_AIF3TX4MIX_INPUT_2_VOLUME 0x79B
#define MADERA_AIF3TX4MIX_INPUT_3_SOURCE 0x79C
#define MADERA_AIF3TX4MIX_INPUT_3_VOLUME 0x79D
#define MADERA_AIF3TX4MIX_INPUT_4_SOURCE 0x79E
#define MADERA_AIF3TX4MIX_INPUT_4_VOLUME 0x79F
#define CS47L92_AIF3TX5MIX_INPUT_1_SOURCE 0x7A0
#define CS47L92_AIF3TX5MIX_INPUT_1_VOLUME 0x7A1
#define CS47L92_AIF3TX5MIX_INPUT_2_SOURCE 0x7A2
#define CS47L92_AIF3TX5MIX_INPUT_2_VOLUME 0x7A3
#define CS47L92_AIF3TX5MIX_INPUT_3_SOURCE 0x7A4
#define CS47L92_AIF3TX5MIX_INPUT_3_VOLUME 0x7A5
#define CS47L92_AIF3TX5MIX_INPUT_4_SOURCE 0x7A6
#define CS47L92_AIF3TX5MIX_INPUT_4_VOLUME 0x7A7
#define CS47L92_AIF3TX6MIX_INPUT_1_SOURCE 0x7A8
#define CS47L92_AIF3TX6MIX_INPUT_1_VOLUME 0x7A9
#define CS47L92_AIF3TX6MIX_INPUT_2_SOURCE 0x7AA
#define CS47L92_AIF3TX6MIX_INPUT_2_VOLUME 0x7AB
#define CS47L92_AIF3TX6MIX_INPUT_3_SOURCE 0x7AC
#define CS47L92_AIF3TX6MIX_INPUT_3_VOLUME 0x7AD
#define CS47L92_AIF3TX6MIX_INPUT_4_SOURCE 0x7AE
#define CS47L92_AIF3TX6MIX_INPUT_4_VOLUME 0x7AF
#define CS47L92_AIF3TX7MIX_INPUT_1_SOURCE 0x7B0
#define CS47L92_AIF3TX7MIX_INPUT_1_VOLUME 0x7B1
#define CS47L92_AIF3TX7MIX_INPUT_2_SOURCE 0x7B2
#define CS47L92_AIF3TX7MIX_INPUT_2_VOLUME 0x7B3
#define CS47L92_AIF3TX7MIX_INPUT_3_SOURCE 0x7B4
#define CS47L92_AIF3TX7MIX_INPUT_3_VOLUME 0x7B5
#define CS47L92_AIF3TX7MIX_INPUT_4_SOURCE 0x7B6
#define CS47L92_AIF3TX7MIX_INPUT_4_VOLUME 0x7B7
#define CS47L92_AIF3TX8MIX_INPUT_1_SOURCE 0x7B8
#define CS47L92_AIF3TX8MIX_INPUT_1_VOLUME 0x7B9
#define CS47L92_AIF3TX8MIX_INPUT_2_SOURCE 0x7BA
#define CS47L92_AIF3TX8MIX_INPUT_2_VOLUME 0x7BB
#define CS47L92_AIF3TX8MIX_INPUT_3_SOURCE 0x7BC
#define CS47L92_AIF3TX8MIX_INPUT_3_VOLUME 0x7BD
#define CS47L92_AIF3TX8MIX_INPUT_4_SOURCE 0x7BE
#define CS47L92_AIF3TX8MIX_INPUT_4_VOLUME 0x7BF
#define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0
#define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1
#define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2
@@ -1103,68 +1167,8 @@
#define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73
#define MADERA_FCR_COEFF_START 0xF74
#define MADERA_FCR_COEFF_END 0xFC5
#define MADERA_DAC_COMP_1 0x1300
#define MADERA_DAC_COMP_2 0x1302
#define MADERA_FRF_COEFFICIENT_1L_1 0x1380
#define MADERA_FRF_COEFFICIENT_1L_2 0x1381
#define MADERA_FRF_COEFFICIENT_1L_3 0x1382
#define MADERA_FRF_COEFFICIENT_1L_4 0x1383
#define MADERA_FRF_COEFFICIENT_1R_1 0x1390
#define MADERA_FRF_COEFFICIENT_1R_2 0x1391
#define MADERA_FRF_COEFFICIENT_1R_3 0x1392
#define MADERA_FRF_COEFFICIENT_1R_4 0x1393
#define MADERA_FRF_COEFFICIENT_2L_1 0x13A0
#define MADERA_FRF_COEFFICIENT_2L_2 0x13A1
#define MADERA_FRF_COEFFICIENT_2L_3 0x13A2
#define MADERA_FRF_COEFFICIENT_2L_4 0x13A3
#define MADERA_FRF_COEFFICIENT_2R_1 0x13B0
#define MADERA_FRF_COEFFICIENT_2R_2 0x13B1
#define MADERA_FRF_COEFFICIENT_2R_3 0x13B2
#define MADERA_FRF_COEFFICIENT_2R_4 0x13B3
#define MADERA_FRF_COEFFICIENT_3L_1 0x13C0
#define MADERA_FRF_COEFFICIENT_3L_2 0x13C1
#define MADERA_FRF_COEFFICIENT_3L_3 0x13C2
#define MADERA_FRF_COEFFICIENT_3L_4 0x13C3
#define MADERA_FRF_COEFFICIENT_3R_1 0x13D0
#define MADERA_FRF_COEFFICIENT_3R_2 0x13D1
#define MADERA_FRF_COEFFICIENT_3R_3 0x13D2
#define MADERA_FRF_COEFFICIENT_3R_4 0x13D3
#define MADERA_FRF_COEFFICIENT_4L_1 0x13E0
#define MADERA_FRF_COEFFICIENT_4L_2 0x13E1
#define MADERA_FRF_COEFFICIENT_4L_3 0x13E2
#define MADERA_FRF_COEFFICIENT_4L_4 0x13E3
#define MADERA_FRF_COEFFICIENT_4R_1 0x13F0
#define MADERA_FRF_COEFFICIENT_4R_2 0x13F1
#define MADERA_FRF_COEFFICIENT_4R_3 0x13F2
#define MADERA_FRF_COEFFICIENT_4R_4 0x13F3
#define CS47L35_FRF_COEFFICIENT_4L_1 0x13A0
#define CS47L35_FRF_COEFFICIENT_4L_2 0x13A1
#define CS47L35_FRF_COEFFICIENT_4L_3 0x13A2
#define CS47L35_FRF_COEFFICIENT_4L_4 0x13A3
#define CS47L35_FRF_COEFFICIENT_5L_1 0x13B0
#define CS47L35_FRF_COEFFICIENT_5L_2 0x13B1
#define CS47L35_FRF_COEFFICIENT_5L_3 0x13B2
#define CS47L35_FRF_COEFFICIENT_5L_4 0x13B3
#define CS47L35_FRF_COEFFICIENT_5R_1 0x13C0
#define CS47L35_FRF_COEFFICIENT_5R_2 0x13C1
#define CS47L35_FRF_COEFFICIENT_5R_3 0x13C2
#define CS47L35_FRF_COEFFICIENT_5R_4 0x13C3
#define MADERA_FRF_COEFFICIENT_5L_1 0x1400
#define MADERA_FRF_COEFFICIENT_5L_2 0x1401
#define MADERA_FRF_COEFFICIENT_5L_3 0x1402
#define MADERA_FRF_COEFFICIENT_5L_4 0x1403
#define MADERA_FRF_COEFFICIENT_5R_1 0x1410
#define MADERA_FRF_COEFFICIENT_5R_2 0x1411
#define MADERA_FRF_COEFFICIENT_5R_3 0x1412
#define MADERA_FRF_COEFFICIENT_5R_4 0x1413
#define MADERA_FRF_COEFFICIENT_6L_1 0x1420
#define MADERA_FRF_COEFFICIENT_6L_2 0x1421
#define MADERA_FRF_COEFFICIENT_6L_3 0x1422
#define MADERA_FRF_COEFFICIENT_6L_4 0x1423
#define MADERA_FRF_COEFFICIENT_6R_1 0x1430
#define MADERA_FRF_COEFFICIENT_6R_2 0x1431
#define MADERA_FRF_COEFFICIENT_6R_3 0x1432
#define MADERA_FRF_COEFFICIENT_6R_4 0x1433
#define MADERA_AUXPDM1_CTRL_0 0x10C0
#define MADERA_AUXPDM1_CTRL_1 0x10C1
#define MADERA_DFC1_CTRL 0x1480
#define MADERA_DFC1_RX 0x1482
#define MADERA_DFC1_TX 0x1484
@@ -1202,6 +1206,8 @@
#define MADERA_GPIO1_CTRL_2 0x1701
#define MADERA_GPIO2_CTRL_1 0x1702
#define MADERA_GPIO2_CTRL_2 0x1703
#define MADERA_GPIO15_CTRL_1 0x171C
#define MADERA_GPIO15_CTRL_2 0x171D
#define MADERA_GPIO16_CTRL_1 0x171E
#define MADERA_GPIO16_CTRL_2 0x171F
#define MADERA_GPIO38_CTRL_1 0x174A
@@ -1232,6 +1238,7 @@
#define MADERA_IRQ2_CTRL 0x1A82
#define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0
#define MADERA_WSEQ_SEQUENCE_1 0x3000
#define MADERA_WSEQ_SEQUENCE_225 0x31C0
#define MADERA_WSEQ_SEQUENCE_252 0x31F6
#define CS47L35_OTP_HPDET_CAL_1 0x31F8
#define CS47L35_OTP_HPDET_CAL_2 0x31FA
@@ -1441,6 +1448,12 @@
#define MADERA_OPCLK_ASYNC_SEL_WIDTH 3
/* (0x0171) FLL1_Control_1 */
#define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000
#define CS47L92_FLL1_REFCLK_SRC_SHIFT 12
#define CS47L92_FLL1_REFCLK_SRC_WIDTH 4
#define MADERA_FLL1_HOLD_MASK 0x0004
#define MADERA_FLL1_HOLD_SHIFT 2
#define MADERA_FLL1_HOLD_WIDTH 1
#define MADERA_FLL1_FREERUN 0x0002
#define MADERA_FLL1_FREERUN_MASK 0x0002
#define MADERA_FLL1_FREERUN_SHIFT 1
@@ -1473,6 +1486,9 @@
#define MADERA_FLL1_FRATIO_MASK 0x0F00
#define MADERA_FLL1_FRATIO_SHIFT 8
#define MADERA_FLL1_FRATIO_WIDTH 4
#define MADERA_FLL1_FB_DIV_MASK 0x03FF
#define MADERA_FLL1_FB_DIV_SHIFT 0
#define MADERA_FLL1_FB_DIV_WIDTH 10
/* (0x0176) FLL1_Control_6 */
#define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0
@@ -1482,15 +1498,6 @@
#define MADERA_FLL1_REFCLK_SRC_SHIFT 0
#define MADERA_FLL1_REFCLK_SRC_WIDTH 4
/* (0x0177) FLL1_Loop_Filter_Test_1 */
#define MADERA_FLL1_FRC_INTEG_UPD 0x8000
#define MADERA_FLL1_FRC_INTEG_UPD_MASK 0x8000
#define MADERA_FLL1_FRC_INTEG_UPD_SHIFT 15
#define MADERA_FLL1_FRC_INTEG_UPD_WIDTH 1
#define MADERA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF
#define MADERA_FLL1_FRC_INTEG_VAL_SHIFT 0
#define MADERA_FLL1_FRC_INTEG_VAL_WIDTH 12
/* (0x0179) FLL1_Control_7 */
#define MADERA_FLL1_GAIN_MASK 0x003c
#define MADERA_FLL1_GAIN_SHIFT 2
@@ -1504,6 +1511,30 @@
#define MADERA_FLL1_PHASE_ENA_SHIFT 11
#define MADERA_FLL1_PHASE_ENA_WIDTH 1
/* (0x017A) FLL1_Control_10 */
#define MADERA_FLL1_HP_MASK 0xC000
#define MADERA_FLL1_HP_SHIFT 14
#define MADERA_FLL1_HP_WIDTH 2
#define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000
#define MADERA_FLL1_PHASEDET_ENA_SHIFT 12
#define MADERA_FLL1_PHASEDET_ENA_WIDTH 1
/* (0x017B) FLL1_Control_11 */
#define MADERA_FLL1_LOCKDET_THR_MASK 0x001E
#define MADERA_FLL1_LOCKDET_THR_SHIFT 1
#define MADERA_FLL1_LOCKDET_THR_WIDTH 4
#define MADERA_FLL1_LOCKDET_MASK 0x0001
#define MADERA_FLL1_LOCKDET_SHIFT 0
#define MADERA_FLL1_LOCKDET_WIDTH 1
/* (0x017D) FLL1_Digital_Test_1 */
#define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100
#define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8
#define MADERA_FLL1_SYNC_EFS_ENA_WIDTH 1
#define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003
#define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0
#define MADERA_FLL1_CLK_VCO_FAST_SRC_WIDTH 2
/* (0x0181) FLL1_Synchroniser_1 */
#define MADERA_FLL1_SYNC_ENA 0x0001
#define MADERA_FLL1_SYNC_ENA_MASK 0x0001
@@ -1625,6 +1656,13 @@
#define MADERA_LDO2_ENA_WIDTH 1
/* (0x0218) Mic_Bias_Ctrl_1 */
#define MADERA_MICB1_EXT_CAP 0x8000
#define MADERA_MICB1_EXT_CAP_MASK 0x8000
#define MADERA_MICB1_EXT_CAP_SHIFT 15
#define MADERA_MICB1_EXT_CAP_WIDTH 1
#define MADERA_MICB1_LVL_MASK 0x01E0
#define MADERA_MICB1_LVL_SHIFT 5
#define MADERA_MICB1_LVL_WIDTH 4
#define MADERA_MICB1_ENA 0x0001
#define MADERA_MICB1_ENA_MASK 0x0001
#define MADERA_MICB1_ENA_SHIFT 0
@@ -2308,6 +2346,17 @@
#define MADERA_OUT1R_ENA_SHIFT 0
#define MADERA_OUT1R_ENA_WIDTH 1
/* (0x0408) Output_Rate_1 */
#define MADERA_CP_DAC_MODE_MASK 0x0040
#define MADERA_CP_DAC_MODE_SHIFT 6
#define MADERA_CP_DAC_MODE_WIDTH 1
#define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030
#define MADERA_OUT_EXT_CLK_DIV_SHIFT 4
#define MADERA_OUT_EXT_CLK_DIV_WIDTH 2
#define MADERA_OUT_CLK_SRC_MASK 0x0007
#define MADERA_OUT_CLK_SRC_SHIFT 0
#define MADERA_OUT_CLK_SRC_WIDTH 3
/* (0x0409) Output_Volume_Ramp */
#define MADERA_OUT_VD_RAMP_MASK 0x0070
#define MADERA_OUT_VD_RAMP_SHIFT 4
@@ -2829,6 +2878,30 @@
#define MADERA_AIF2RX1_ENA_WIDTH 1
/* (0x0599) AIF3_Tx_Enables */
#define MADERA_AIF3TX8_ENA 0x0080
#define MADERA_AIF3TX8_ENA_MASK 0x0080
#define MADERA_AIF3TX8_ENA_SHIFT 7
#define MADERA_AIF3TX8_ENA_WIDTH 1
#define MADERA_AIF3TX7_ENA 0x0040
#define MADERA_AIF3TX7_ENA_MASK 0x0040
#define MADERA_AIF3TX7_ENA_SHIFT 6
#define MADERA_AIF3TX7_ENA_WIDTH 1
#define MADERA_AIF3TX6_ENA 0x0020
#define MADERA_AIF3TX6_ENA_MASK 0x0020
#define MADERA_AIF3TX6_ENA_SHIFT 5
#define MADERA_AIF3TX6_ENA_WIDTH 1
#define MADERA_AIF3TX5_ENA 0x0010
#define MADERA_AIF3TX5_ENA_MASK 0x0010
#define MADERA_AIF3TX5_ENA_SHIFT 4
#define MADERA_AIF3TX5_ENA_WIDTH 1
#define MADERA_AIF3TX4_ENA 0x0008
#define MADERA_AIF3TX4_ENA_MASK 0x0008
#define MADERA_AIF3TX4_ENA_SHIFT 3
#define MADERA_AIF3TX4_ENA_WIDTH 1
#define MADERA_AIF3TX3_ENA 0x0004
#define MADERA_AIF3TX3_ENA_MASK 0x0004
#define MADERA_AIF3TX3_ENA_SHIFT 2
#define MADERA_AIF3TX3_ENA_WIDTH 1
#define MADERA_AIF3TX2_ENA 0x0002
#define MADERA_AIF3TX2_ENA_MASK 0x0002
#define MADERA_AIF3TX2_ENA_SHIFT 1
@@ -2839,6 +2912,30 @@
#define MADERA_AIF3TX1_ENA_WIDTH 1
/* (0x059A) AIF3_Rx_Enables */
#define MADERA_AIF3RX8_ENA 0x0080
#define MADERA_AIF3RX8_ENA_MASK 0x0080
#define MADERA_AIF3RX8_ENA_SHIFT 7
#define MADERA_AIF3RX8_ENA_WIDTH 1
#define MADERA_AIF3RX7_ENA 0x0040
#define MADERA_AIF3RX7_ENA_MASK 0x0040
#define MADERA_AIF3RX7_ENA_SHIFT 6
#define MADERA_AIF3RX7_ENA_WIDTH 1
#define MADERA_AIF3RX6_ENA 0x0020
#define MADERA_AIF3RX6_ENA_MASK 0x0020
#define MADERA_AIF3RX6_ENA_SHIFT 5
#define MADERA_AIF3RX6_ENA_WIDTH 1
#define MADERA_AIF3RX5_ENA 0x0010
#define MADERA_AIF3RX5_ENA_MASK 0x0010
#define MADERA_AIF3RX5_ENA_SHIFT 4
#define MADERA_AIF3RX5_ENA_WIDTH 1
#define MADERA_AIF3RX4_ENA 0x0008
#define MADERA_AIF3RX4_ENA_MASK 0x0008
#define MADERA_AIF3RX4_ENA_SHIFT 3
#define MADERA_AIF3RX4_ENA_WIDTH 1
#define MADERA_AIF3RX3_ENA 0x0004
#define MADERA_AIF3RX3_ENA_MASK 0x0004
#define MADERA_AIF3RX3_ENA_SHIFT 2
#define MADERA_AIF3RX3_ENA_WIDTH 1
#define MADERA_AIF3RX2_ENA 0x0002
#define MADERA_AIF3RX2_ENA_MASK 0x0002
#define MADERA_AIF3RX2_ENA_SHIFT 1
@@ -3453,6 +3550,25 @@
#define MADERA_FCR_MIC_MODE_SEL_SHIFT 2
#define MADERA_FCR_MIC_MODE_SEL_WIDTH 2
/* (0x10C0) AUXPDM1_CTRL_0 */
#define MADERA_AUXPDM1_SRC_MASK 0x0F00
#define MADERA_AUXPDM1_SRC_SHIFT 8
#define MADERA_AUXPDM1_SRC_WIDTH 4
#define MADERA_AUXPDM1_TXEDGE_MASK 0x0010
#define MADERA_AUXPDM1_TXEDGE_SHIFT 4
#define MADERA_AUXPDM1_TXEDGE_WIDTH 1
#define MADERA_AUXPDM1_MSTR_MASK 0x0008
#define MADERA_AUXPDM1_MSTR_SHIFT 3
#define MADERA_AUXPDM1_MSTR_WIDTH 1
#define MADERA_AUXPDM1_ENABLE_MASK 0x0001
#define MADERA_AUXPDM1_ENABLE_SHIFT 0
#define MADERA_AUXPDM1_ENABLE_WIDTH 1
/* (0x10C1) AUXPDM1_CTRL_1 */
#define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000
#define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14
#define MADERA_AUXPDM1_CLK_FREQ_WIDTH 2
/* (0x1480) DFC1_CTRL_W0 */
#define MADERA_DFC1_RATE_MASK 0x007C
#define MADERA_DFC1_RATE_SHIFT 2

View File

@@ -374,6 +374,7 @@ enum rk805_reg {
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
#define DEV_OFF BIT(0)
#define RTC_STOP BIT(0)
#define VB_LO_ACT BIT(4)
#define VB_LO_SEL_3500MV (7 << 0)
@@ -387,7 +388,179 @@ enum rk805_reg {
#define SHUTDOWN_FUN (0x2 << 2)
#define SLEEP_FUN (0x1 << 2)
#define RK8XX_ID_MSK 0xfff0
#define PWM_MODE_MSK BIT(7)
#define FPWM_MODE BIT(7)
#define AUTO_PWM_MODE 0
enum rk817_reg_id {
RK817_ID_DCDC1 = 0,
RK817_ID_DCDC2,
RK817_ID_DCDC3,
RK817_ID_DCDC4,
RK817_ID_LDO1,
RK817_ID_LDO2,
RK817_ID_LDO3,
RK817_ID_LDO4,
RK817_ID_LDO5,
RK817_ID_LDO6,
RK817_ID_LDO7,
RK817_ID_LDO8,
RK817_ID_LDO9,
RK817_ID_BOOST,
RK817_ID_BOOST_OTG_SW,
RK817_NUM_REGULATORS
};
enum rk809_reg_id {
RK809_ID_DCDC5 = RK817_ID_BOOST,
RK809_ID_SW1,
RK809_ID_SW2,
RK809_NUM_REGULATORS
};
#define RK817_SECONDS_REG 0x00
#define RK817_MINUTES_REG 0x01
#define RK817_HOURS_REG 0x02
#define RK817_DAYS_REG 0x03
#define RK817_MONTHS_REG 0x04
#define RK817_YEARS_REG 0x05
#define RK817_WEEKS_REG 0x06
#define RK817_ALARM_SECONDS_REG 0x07
#define RK817_ALARM_MINUTES_REG 0x08
#define RK817_ALARM_HOURS_REG 0x09
#define RK817_ALARM_DAYS_REG 0x0a
#define RK817_ALARM_MONTHS_REG 0x0b
#define RK817_ALARM_YEARS_REG 0x0c
#define RK817_RTC_CTRL_REG 0xd
#define RK817_RTC_STATUS_REG 0xe
#define RK817_RTC_INT_REG 0xf
#define RK817_RTC_COMP_LSB_REG 0x10
#define RK817_RTC_COMP_MSB_REG 0x11
#define RK817_POWER_EN_REG(i) (0xb1 + (i))
#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
#define RK817_POWER_CONFIG (0xb9)
#define RK817_BUCK_CONFIG_REG(i) (0xba + (i) * 3)
#define RK817_BUCK1_ON_VSEL_REG 0xBB
#define RK817_BUCK1_SLP_VSEL_REG 0xBC
#define RK817_BUCK2_CONFIG_REG 0xBD
#define RK817_BUCK2_ON_VSEL_REG 0xBE
#define RK817_BUCK2_SLP_VSEL_REG 0xBF
#define RK817_BUCK3_CONFIG_REG 0xC0
#define RK817_BUCK3_ON_VSEL_REG 0xC1
#define RK817_BUCK3_SLP_VSEL_REG 0xC2
#define RK817_BUCK4_CONFIG_REG 0xC3
#define RK817_BUCK4_ON_VSEL_REG 0xC4
#define RK817_BUCK4_SLP_VSEL_REG 0xC5
#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2)
#define RK817_BOOST_OTG_CFG (0xde)
#define RK817_ID_MSB 0xed
#define RK817_ID_LSB 0xee
#define RK817_SYS_STS 0xf0
#define RK817_SYS_CFG(i) (0xf1 + (i))
#define RK817_ON_SOURCE_REG 0xf5
#define RK817_OFF_SOURCE_REG 0xf6
/* INTERRUPT REGISTER */
#define RK817_INT_STS_REG0 0xf8
#define RK817_INT_STS_MSK_REG0 0xf9
#define RK817_INT_STS_REG1 0xfa
#define RK817_INT_STS_MSK_REG1 0xfb
#define RK817_INT_STS_REG2 0xfc
#define RK817_INT_STS_MSK_REG2 0xfd
#define RK817_GPIO_INT_CFG 0xfe
/* IRQ Definitions */
#define RK817_IRQ_PWRON_FALL 0
#define RK817_IRQ_PWRON_RISE 1
#define RK817_IRQ_PWRON 2
#define RK817_IRQ_PWMON_LP 3
#define RK817_IRQ_HOTDIE 4
#define RK817_IRQ_RTC_ALARM 5
#define RK817_IRQ_RTC_PERIOD 6
#define RK817_IRQ_VB_LO 7
#define RK817_IRQ_PLUG_IN 8
#define RK817_IRQ_PLUG_OUT 9
#define RK817_IRQ_CHRG_TERM 10
#define RK817_IRQ_CHRG_TIME 11
#define RK817_IRQ_CHRG_TS 12
#define RK817_IRQ_USB_OV 13
#define RK817_IRQ_CHRG_IN_CLMP 14
#define RK817_IRQ_BAT_DIS_ILIM 15
#define RK817_IRQ_GATE_GPIO 16
#define RK817_IRQ_TS_GPIO 17
#define RK817_IRQ_CODEC_PD 18
#define RK817_IRQ_CODEC_PO 19
#define RK817_IRQ_CLASSD_MUTE_DONE 20
#define RK817_IRQ_CLASSD_OCP 21
#define RK817_IRQ_BAT_OVP 22
#define RK817_IRQ_CHRG_BAT_HI 23
#define RK817_IRQ_END (RK817_IRQ_CHRG_BAT_HI + 1)
/*
* rtc_ctrl 0xd
* same as 808, except bit4
*/
#define RK817_RTC_CTRL_RSV4 BIT(4)
/* power config 0xb9 */
#define RK817_BUCK3_FB_RES_MSK BIT(6)
#define RK817_BUCK3_FB_RES_INTER BIT(6)
#define RK817_BUCK3_FB_RES_EXT 0
/* buck config 0xba */
#define RK817_RAMP_RATE_OFFSET 6
#define RK817_RAMP_RATE_MASK (0x3 << RK817_RAMP_RATE_OFFSET)
#define RK817_RAMP_RATE_3MV_PER_US (0x0 << RK817_RAMP_RATE_OFFSET)
#define RK817_RAMP_RATE_6_3MV_PER_US (0x1 << RK817_RAMP_RATE_OFFSET)
#define RK817_RAMP_RATE_12_5MV_PER_US (0x2 << RK817_RAMP_RATE_OFFSET)
#define RK817_RAMP_RATE_25MV_PER_US (0x3 << RK817_RAMP_RATE_OFFSET)
/* sys_cfg1 0xf2 */
#define RK817_HOTDIE_TEMP_MSK (0x3 << 4)
#define RK817_HOTDIE_85 (0x0 << 4)
#define RK817_HOTDIE_95 (0x1 << 4)
#define RK817_HOTDIE_105 (0x2 << 4)
#define RK817_HOTDIE_115 (0x3 << 4)
#define RK817_TSD_TEMP_MSK BIT(6)
#define RK817_TSD_140 0
#define RK817_TSD_160 BIT(6)
#define RK817_CLK32KOUT2_EN BIT(7)
/* sys_cfg3 0xf4 */
#define RK817_SLPPIN_FUNC_MSK (0x3 << 3)
#define SLPPIN_NULL_FUN (0x0 << 3)
#define SLPPIN_SLP_FUN (0x1 << 3)
#define SLPPIN_DN_FUN (0x2 << 3)
#define SLPPIN_RST_FUN (0x3 << 3)
#define RK817_RST_FUNC_MSK (0x3 << 6)
#define RK817_RST_FUNC_SFT (6)
#define RK817_RST_FUNC_CNT (3)
#define RK817_RST_FUNC_DEV (0) /* reset the dev */
#define RK817_RST_FUNC_REG (0x1 << 6) /* reset the reg only */
#define RK817_SLPPOL_MSK BIT(5)
#define RK817_SLPPOL_H BIT(5)
#define RK817_SLPPOL_L (0)
/* gpio&int 0xfe */
#define RK817_INT_POL_MSK BIT(1)
#define RK817_INT_POL_H BIT(1)
#define RK817_INT_POL_L 0
#define RK809_BUCK5_CONFIG(i) (RK817_BOOST_OTG_CFG + (i) * 1)
enum {
BUCK_ILMIN_50MA,
@@ -435,6 +608,8 @@ enum {
enum {
RK805_ID = 0x8050,
RK808_ID = 0x0000,
RK809_ID = 0x8090,
RK817_ID = 0x8170,
RK818_ID = 0x8181,
};
@@ -445,5 +620,7 @@ struct rk808 {
long variant;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
void (*pm_pwroff_fn)(void);
void (*pm_pwroff_prep_fn)(void);
};
#endif /* __LINUX_REGULATOR_RK808_H */

View File

@@ -0,0 +1,408 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2018 ROHM Semiconductors */
#ifndef __LINUX_MFD_BD70528_H__
#define __LINUX_MFD_BD70528_H__
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/regmap.h>
enum {
BD70528_BUCK1,
BD70528_BUCK2,
BD70528_BUCK3,
BD70528_LDO1,
BD70528_LDO2,
BD70528_LDO3,
BD70528_LED1,
BD70528_LED2,
};
struct bd70528_data {
struct rohm_regmap_dev chip;
struct mutex rtc_timer_lock;
};
#define BD70528_BUCK_VOLTS 17
#define BD70528_BUCK_VOLTS 17
#define BD70528_BUCK_VOLTS 17
#define BD70528_LDO_VOLTS 0x20
#define BD70528_REG_BUCK1_EN 0x0F
#define BD70528_REG_BUCK1_VOLT 0x15
#define BD70528_REG_BUCK2_EN 0x10
#define BD70528_REG_BUCK2_VOLT 0x16
#define BD70528_REG_BUCK3_EN 0x11
#define BD70528_REG_BUCK3_VOLT 0x17
#define BD70528_REG_LDO1_EN 0x1b
#define BD70528_REG_LDO1_VOLT 0x1e
#define BD70528_REG_LDO2_EN 0x1c
#define BD70528_REG_LDO2_VOLT 0x1f
#define BD70528_REG_LDO3_EN 0x1d
#define BD70528_REG_LDO3_VOLT 0x20
#define BD70528_REG_LED_CTRL 0x2b
#define BD70528_REG_LED_VOLT 0x29
#define BD70528_REG_LED_EN 0x2a
/* main irq registers */
#define BD70528_REG_INT_MAIN 0x7E
#define BD70528_REG_INT_MAIN_MASK 0x74
/* 'sub irq' registers */
#define BD70528_REG_INT_SHDN 0x7F
#define BD70528_REG_INT_PWR_FLT 0x80
#define BD70528_REG_INT_VR_FLT 0x81
#define BD70528_REG_INT_MISC 0x82
#define BD70528_REG_INT_BAT1 0x83
#define BD70528_REG_INT_BAT2 0x84
#define BD70528_REG_INT_RTC 0x85
#define BD70528_REG_INT_GPIO 0x86
#define BD70528_REG_INT_OP_FAIL 0x87
#define BD70528_REG_INT_SHDN_MASK 0x75
#define BD70528_REG_INT_PWR_FLT_MASK 0x76
#define BD70528_REG_INT_VR_FLT_MASK 0x77
#define BD70528_REG_INT_MISC_MASK 0x78
#define BD70528_REG_INT_BAT1_MASK 0x79
#define BD70528_REG_INT_BAT2_MASK 0x7a
#define BD70528_REG_INT_RTC_MASK 0x7b
#define BD70528_REG_INT_GPIO_MASK 0x7c
#define BD70528_REG_INT_OP_FAIL_MASK 0x7d
/* Reset related 'magic' registers */
#define BD70528_REG_SHIPMODE 0x03
#define BD70528_REG_HWRESET 0x04
#define BD70528_REG_WARMRESET 0x05
#define BD70528_REG_STANDBY 0x06
/* GPIO registers */
#define BD70528_REG_GPIO_STATE 0x8F
#define BD70528_REG_GPIO1_IN 0x4d
#define BD70528_REG_GPIO2_IN 0x4f
#define BD70528_REG_GPIO3_IN 0x51
#define BD70528_REG_GPIO4_IN 0x53
#define BD70528_REG_GPIO1_OUT 0x4e
#define BD70528_REG_GPIO2_OUT 0x50
#define BD70528_REG_GPIO3_OUT 0x52
#define BD70528_REG_GPIO4_OUT 0x54
/* clk control */
#define BD70528_REG_CLK_OUT 0x2c
/* RTC */
#define BD70528_REG_RTC_COUNT_H 0x2d
#define BD70528_REG_RTC_COUNT_L 0x2e
#define BD70528_REG_RTC_SEC 0x2f
#define BD70528_REG_RTC_MINUTE 0x30
#define BD70528_REG_RTC_HOUR 0x31
#define BD70528_REG_RTC_WEEK 0x32
#define BD70528_REG_RTC_DAY 0x33
#define BD70528_REG_RTC_MONTH 0x34
#define BD70528_REG_RTC_YEAR 0x35
#define BD70528_REG_RTC_ALM_SEC 0x36
#define BD70528_REG_RTC_ALM_START BD70528_REG_RTC_ALM_SEC
#define BD70528_REG_RTC_ALM_MINUTE 0x37
#define BD70528_REG_RTC_ALM_HOUR 0x38
#define BD70528_REG_RTC_ALM_WEEK 0x39
#define BD70528_REG_RTC_ALM_DAY 0x3a
#define BD70528_REG_RTC_ALM_MONTH 0x3b
#define BD70528_REG_RTC_ALM_YEAR 0x3c
#define BD70528_REG_RTC_ALM_MASK 0x3d
#define BD70528_REG_RTC_ALM_REPEAT 0x3e
#define BD70528_REG_RTC_START BD70528_REG_RTC_SEC
#define BD70528_REG_RTC_WAKE_SEC 0x43
#define BD70528_REG_RTC_WAKE_START BD70528_REG_RTC_WAKE_SEC
#define BD70528_REG_RTC_WAKE_MIN 0x44
#define BD70528_REG_RTC_WAKE_HOUR 0x45
#define BD70528_REG_RTC_WAKE_CTRL 0x46
#define BD70528_REG_ELAPSED_TIMER_EN 0x42
#define BD70528_REG_WAKE_EN 0x46
/* WDT registers */
#define BD70528_REG_WDT_CTRL 0x4A
#define BD70528_REG_WDT_HOUR 0x49
#define BD70528_REG_WDT_MINUTE 0x48
#define BD70528_REG_WDT_SEC 0x47
/* Charger / Battery */
#define BD70528_REG_CHG_CURR_STAT 0x59
#define BD70528_REG_CHG_BAT_STAT 0x57
#define BD70528_REG_CHG_BAT_TEMP 0x58
#define BD70528_REG_CHG_IN_STAT 0x56
#define BD70528_REG_CHG_DCIN_ILIM 0x5d
#define BD70528_REG_CHG_CHG_CURR_WARM 0x61
#define BD70528_REG_CHG_CHG_CURR_COLD 0x62
/* Masks for main IRQ register bits */
enum {
BD70528_INT_SHDN,
#define BD70528_INT_SHDN_MASK BIT(BD70528_INT_SHDN)
BD70528_INT_PWR_FLT,
#define BD70528_INT_PWR_FLT_MASK BIT(BD70528_INT_PWR_FLT)
BD70528_INT_VR_FLT,
#define BD70528_INT_VR_FLT_MASK BIT(BD70528_INT_VR_FLT)
BD70528_INT_MISC,
#define BD70528_INT_MISC_MASK BIT(BD70528_INT_MISC)
BD70528_INT_BAT1,
#define BD70528_INT_BAT1_MASK BIT(BD70528_INT_BAT1)
BD70528_INT_RTC,
#define BD70528_INT_RTC_MASK BIT(BD70528_INT_RTC)
BD70528_INT_GPIO,
#define BD70528_INT_GPIO_MASK BIT(BD70528_INT_GPIO)
BD70528_INT_OP_FAIL,
#define BD70528_INT_OP_FAIL_MASK BIT(BD70528_INT_OP_FAIL)
};
/* IRQs */
enum {
/* Shutdown register IRQs */
BD70528_INT_LONGPUSH,
BD70528_INT_WDT,
BD70528_INT_HWRESET,
BD70528_INT_RSTB_FAULT,
BD70528_INT_VBAT_UVLO,
BD70528_INT_TSD,
BD70528_INT_RSTIN,
/* Power failure register IRQs */
BD70528_INT_BUCK1_FAULT,
BD70528_INT_BUCK2_FAULT,
BD70528_INT_BUCK3_FAULT,
BD70528_INT_LDO1_FAULT,
BD70528_INT_LDO2_FAULT,
BD70528_INT_LDO3_FAULT,
BD70528_INT_LED1_FAULT,
BD70528_INT_LED2_FAULT,
/* VR FAULT register IRQs */
BD70528_INT_BUCK1_OCP,
BD70528_INT_BUCK2_OCP,
BD70528_INT_BUCK3_OCP,
BD70528_INT_LED1_OCP,
BD70528_INT_LED2_OCP,
BD70528_INT_BUCK1_FULLON,
BD70528_INT_BUCK2_FULLON,
/* PMU register interrupts */
BD70528_INT_SHORTPUSH,
BD70528_INT_AUTO_WAKEUP,
BD70528_INT_STATE_CHANGE,
/* Charger 1 register IRQs */
BD70528_INT_BAT_OV_RES,
BD70528_INT_BAT_OV_DET,
BD70528_INT_DBAT_DET,
BD70528_INT_BATTSD_COLD_RES,
BD70528_INT_BATTSD_COLD_DET,
BD70528_INT_BATTSD_HOT_RES,
BD70528_INT_BATTSD_HOT_DET,
BD70528_INT_CHG_TSD,
/* Charger 2 register IRQs */
BD70528_INT_BAT_RMV,
BD70528_INT_BAT_DET,
BD70528_INT_DCIN2_OV_RES,
BD70528_INT_DCIN2_OV_DET,
BD70528_INT_DCIN2_RMV,
BD70528_INT_DCIN2_DET,
BD70528_INT_DCIN1_RMV,
BD70528_INT_DCIN1_DET,
/* RTC register IRQs */
BD70528_INT_RTC_ALARM,
BD70528_INT_ELPS_TIM,
/* GPIO register IRQs */
BD70528_INT_GPIO0,
BD70528_INT_GPIO1,
BD70528_INT_GPIO2,
BD70528_INT_GPIO3,
/* Invalid operation register IRQs */
BD70528_INT_BUCK1_DVS_OPFAIL,
BD70528_INT_BUCK2_DVS_OPFAIL,
BD70528_INT_BUCK3_DVS_OPFAIL,
BD70528_INT_LED1_VOLT_OPFAIL,
BD70528_INT_LED2_VOLT_OPFAIL,
};
/* Masks */
#define BD70528_INT_LONGPUSH_MASK 0x1
#define BD70528_INT_WDT_MASK 0x2
#define BD70528_INT_HWRESET_MASK 0x4
#define BD70528_INT_RSTB_FAULT_MASK 0x8
#define BD70528_INT_VBAT_UVLO_MASK 0x10
#define BD70528_INT_TSD_MASK 0x20
#define BD70528_INT_RSTIN_MASK 0x40
#define BD70528_INT_BUCK1_FAULT_MASK 0x1
#define BD70528_INT_BUCK2_FAULT_MASK 0x2
#define BD70528_INT_BUCK3_FAULT_MASK 0x4
#define BD70528_INT_LDO1_FAULT_MASK 0x8
#define BD70528_INT_LDO2_FAULT_MASK 0x10
#define BD70528_INT_LDO3_FAULT_MASK 0x20
#define BD70528_INT_LED1_FAULT_MASK 0x40
#define BD70528_INT_LED2_FAULT_MASK 0x80
#define BD70528_INT_BUCK1_OCP_MASK 0x1
#define BD70528_INT_BUCK2_OCP_MASK 0x2
#define BD70528_INT_BUCK3_OCP_MASK 0x4
#define BD70528_INT_LED1_OCP_MASK 0x8
#define BD70528_INT_LED2_OCP_MASK 0x10
#define BD70528_INT_BUCK1_FULLON_MASK 0x20
#define BD70528_INT_BUCK2_FULLON_MASK 0x40
#define BD70528_INT_SHORTPUSH_MASK 0x1
#define BD70528_INT_AUTO_WAKEUP_MASK 0x2
#define BD70528_INT_STATE_CHANGE_MASK 0x10
#define BD70528_INT_BAT_OV_RES_MASK 0x1
#define BD70528_INT_BAT_OV_DET_MASK 0x2
#define BD70528_INT_DBAT_DET_MASK 0x4
#define BD70528_INT_BATTSD_COLD_RES_MASK 0x8
#define BD70528_INT_BATTSD_COLD_DET_MASK 0x10
#define BD70528_INT_BATTSD_HOT_RES_MASK 0x20
#define BD70528_INT_BATTSD_HOT_DET_MASK 0x40
#define BD70528_INT_CHG_TSD_MASK 0x80
#define BD70528_INT_BAT_RMV_MASK 0x1
#define BD70528_INT_BAT_DET_MASK 0x2
#define BD70528_INT_DCIN2_OV_RES_MASK 0x4
#define BD70528_INT_DCIN2_OV_DET_MASK 0x8
#define BD70528_INT_DCIN2_RMV_MASK 0x10
#define BD70528_INT_DCIN2_DET_MASK 0x20
#define BD70528_INT_DCIN1_RMV_MASK 0x40
#define BD70528_INT_DCIN1_DET_MASK 0x80
#define BD70528_INT_RTC_ALARM_MASK 0x1
#define BD70528_INT_ELPS_TIM_MASK 0x2
#define BD70528_INT_GPIO0_MASK 0x1
#define BD70528_INT_GPIO1_MASK 0x2
#define BD70528_INT_GPIO2_MASK 0x4
#define BD70528_INT_GPIO3_MASK 0x8
#define BD70528_INT_BUCK1_DVS_OPFAIL_MASK 0x1
#define BD70528_INT_BUCK2_DVS_OPFAIL_MASK 0x2
#define BD70528_INT_BUCK3_DVS_OPFAIL_MASK 0x4
#define BD70528_INT_LED1_VOLT_OPFAIL_MASK 0x10
#define BD70528_INT_LED2_VOLT_OPFAIL_MASK 0x20
#define BD70528_DEBOUNCE_MASK 0x3
#define BD70528_DEBOUNCE_DISABLE 0
#define BD70528_DEBOUNCE_15MS 1
#define BD70528_DEBOUNCE_30MS 2
#define BD70528_DEBOUNCE_50MS 3
#define BD70528_GPIO_DRIVE_MASK 0x2
#define BD70528_GPIO_PUSH_PULL 0x0
#define BD70528_GPIO_OPEN_DRAIN 0x2
#define BD70528_GPIO_OUT_EN_MASK 0x80
#define BD70528_GPIO_OUT_ENABLE 0x80
#define BD70528_GPIO_OUT_DISABLE 0x0
#define BD70528_GPIO_OUT_HI 0x1
#define BD70528_GPIO_OUT_LO 0x0
#define BD70528_GPIO_OUT_MASK 0x1
#define BD70528_GPIO_IN_STATE_BASE 1
#define BD70528_CLK_OUT_EN_MASK 0x1
/* RTC masks to mask out reserved bits */
#define BD70528_MASK_RTC_SEC 0x7f
#define BD70528_MASK_RTC_MINUTE 0x7f
#define BD70528_MASK_RTC_HOUR_24H 0x80
#define BD70528_MASK_RTC_HOUR_PM 0x20
#define BD70528_MASK_RTC_HOUR 0x1f
#define BD70528_MASK_RTC_DAY 0x3f
#define BD70528_MASK_RTC_WEEK 0x07
#define BD70528_MASK_RTC_MONTH 0x1f
#define BD70528_MASK_RTC_YEAR 0xff
#define BD70528_MASK_RTC_COUNT_L 0x7f
#define BD70528_MASK_ELAPSED_TIMER_EN 0x1
/* Mask second, min and hour fields
* HW would support ALM irq for over 24h
* (by setting day, month and year too)
* but as we wish to keep this same as for
* wake-up we limit ALM to 24H and only
* unmask sec, min and hour
*/
#define BD70528_MASK_ALM_EN 0x7
#define BD70528_MASK_WAKE_EN 0x1
/* WDT masks */
#define BD70528_MASK_WDT_EN 0x1
#define BD70528_MASK_WDT_HOUR 0x1
#define BD70528_MASK_WDT_MINUTE 0x7f
#define BD70528_MASK_WDT_SEC 0x7f
#define BD70528_WDT_STATE_BIT 0x1
#define BD70528_ELAPSED_STATE_BIT 0x2
#define BD70528_WAKE_STATE_BIT 0x4
/* Charger masks */
#define BD70528_MASK_CHG_STAT 0x7f
#define BD70528_MASK_CHG_BAT_TIMER 0x20
#define BD70528_MASK_CHG_BAT_OVERVOLT 0x10
#define BD70528_MASK_CHG_BAT_DETECT 0x1
#define BD70528_MASK_CHG_DCIN1_UVLO 0x1
#define BD70528_MASK_CHG_DCIN_ILIM 0x3f
#define BD70528_MASK_CHG_CHG_CURR 0x1f
#define BD70528_MASK_CHG_TRICKLE_CURR 0x10
/*
* Note, external battery register is the lonely rider at
* address 0xc5. See how to stuff that in the regmap
*/
#define BD70528_MAX_REGISTER 0x94
/* Buck control masks */
#define BD70528_MASK_RUN_EN 0x4
#define BD70528_MASK_STBY_EN 0x2
#define BD70528_MASK_IDLE_EN 0x1
#define BD70528_MASK_LED1_EN 0x1
#define BD70528_MASK_LED2_EN 0x10
#define BD70528_MASK_BUCK_VOLT 0xf
#define BD70528_MASK_LDO_VOLT 0x1f
#define BD70528_MASK_LED1_VOLT 0x1
#define BD70528_MASK_LED2_VOLT 0x10
/* Misc irq masks */
#define BD70528_INT_MASK_SHORT_PUSH 1
#define BD70528_INT_MASK_AUTO_WAKE 2
#define BD70528_INT_MASK_POWER_STATE 4
#define BD70528_MASK_BUCK_RAMP 0x10
#define BD70528_SIFT_BUCK_RAMP 4
#if IS_ENABLED(CONFIG_BD70528_WATCHDOG)
int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state);
void bd70528_wdt_lock(struct rohm_regmap_dev *data);
void bd70528_wdt_unlock(struct rohm_regmap_dev *data);
#else /* CONFIG_BD70528_WATCHDOG */
static inline int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable,
int *old_state)
{
return 0;
}
static inline void bd70528_wdt_lock(struct rohm_regmap_dev *data)
{
}
static inline void bd70528_wdt_unlock(struct rohm_regmap_dev *data)
{
}
#endif /* CONFIG_BD70528_WATCHDOG */
#endif /* __LINUX_MFD_BD70528_H__ */

View File

@@ -4,14 +4,9 @@
#ifndef __LINUX_MFD_BD718XX_H__
#define __LINUX_MFD_BD718XX_H__
#include <linux/mfd/rohm-generic.h>
#include <linux/regmap.h>
enum {
BD718XX_TYPE_BD71837 = 0,
BD718XX_TYPE_BD71847,
BD718XX_TYPE_AMOUNT
};
enum {
BD718XX_BUCK1 = 0,
BD718XX_BUCK2,
@@ -321,18 +316,17 @@ enum {
BD718XX_PWRBTN_LONG_PRESS_15S
};
struct bd718xx_clk;
struct bd718xx {
unsigned int chip_type;
struct device *dev;
struct regmap *regmap;
unsigned long int id;
/*
* Please keep this as the first member here as some
* drivers (clk) supporting more than one chip may only know this
* generic struct 'struct rohm_regmap_dev' and assume it is
* the first chunk of parent device's private data.
*/
struct rohm_regmap_dev chip;
int chip_irq;
struct regmap_irq_chip_data *irq_data;
struct bd718xx_clk *clk;
};
#endif /* __LINUX_MFD_BD718XX_H__ */

View File

@@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2018 ROHM Semiconductors */
#ifndef __LINUX_MFD_ROHM_H__
#define __LINUX_MFD_ROHM_H__
enum {
ROHM_CHIP_TYPE_BD71837 = 0,
ROHM_CHIP_TYPE_BD71847,
ROHM_CHIP_TYPE_BD70528,
ROHM_CHIP_TYPE_AMOUNT
};
struct rohm_regmap_dev {
unsigned int chip_type;
struct device *dev;
struct regmap *regmap;
};
#endif

View File

@@ -5,7 +5,7 @@
*/
#ifndef MFD_STMFX_H
#define MFX_STMFX_H
#define MFD_STMFX_H
#include <linux/regmap.h>

View File

@@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode,
int extra_count);
struct page *newpage, struct page *page, int extra_count);
#else
static inline void putback_movable_pages(struct list_head *l) {}

View File

@@ -805,7 +805,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
u8 reserved_at_23[0xd];
u8 cqe_checksum_full[0x1];
u8 reserved_at_24[0xc];
u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
@@ -1390,7 +1391,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_6c8[0x28];
u8 sf_base_id[0x10];
u8 reserved_at_700[0x100];
u8 reserved_at_700[0x80];
u8 vhca_tunnel_commands[0x40];
u8 reserved_at_7c0[0x40];
};
enum mlx5_flow_destination_type {
@@ -9694,7 +9697,7 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 vhca_tunnel_id[0x10];
u8 obj_type[0x10];
u8 obj_id[0x20];

View File

@@ -37,7 +37,8 @@
#include <linux/mlx5/driver.h>
#define MLX5_INVALID_LKEY 0x100
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8)
#define MLX5_DIF_SIZE 8
#define MLX5_STRIDE_BLOCK_OP 0x400
#define MLX5_CPY_GRD_MASK 0xc0
@@ -70,6 +71,7 @@ enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
MLX5_QP_OPTPAR_DC_HS = 1 << 20,
MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25,
};
enum mlx5_qp_state {

View File

@@ -541,13 +541,30 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
vma->vm_ops = NULL;
}
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
bool vma_is_shmem(struct vm_area_struct *vma);
#else
static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
#endif
int vma_is_stack_for_current(struct vm_area_struct *vma);
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
struct mmu_gather;
struct inode;
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
return 0;
@@ -937,8 +954,6 @@ static inline bool is_zone_device_page(const struct page *page)
#endif
#ifdef CONFIG_DEV_PAGEMAP_OPS
void dev_pagemap_get_ops(void);
void dev_pagemap_put_ops(void);
void __put_devmap_managed_page(struct page *page);
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
static inline bool put_devmap_managed_page(struct page *page)
@@ -949,7 +964,6 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_PUBLIC:
case MEMORY_DEVICE_FS_DAX:
__put_devmap_managed_page(page);
return true;
@@ -959,60 +973,28 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
}
static inline bool is_device_private_page(const struct page *page)
{
return is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
static inline bool is_device_public_page(const struct page *page)
{
return is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PUBLIC;
}
#ifdef CONFIG_PCI_P2PDMA
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
#else /* CONFIG_PCI_P2PDMA */
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return false;
}
#endif /* CONFIG_PCI_P2PDMA */
#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline void dev_pagemap_get_ops(void)
{
}
static inline void dev_pagemap_put_ops(void)
{
}
static inline bool put_devmap_managed_page(struct page *page)
{
return false;
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
{
return false;
}
static inline bool is_device_public_page(const struct page *page)
{
return false;
return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return false;
return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
IS_ENABLED(CONFIG_PCI_P2PDMA) &&
is_zone_device_page(page) &&
page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
/* 127: arbitrary random number, small enough to assemble well */
#define page_ref_zero_or_close_to_overflow(page) \
@@ -1436,10 +1418,8 @@ struct zap_details {
pgoff_t last_index; /* Highest page->index to unmap */
};
struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte, bool with_public_device);
#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
@@ -1580,6 +1560,10 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
@@ -1653,23 +1637,6 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
bool vma_is_shmem(struct vm_area_struct *vma);
#else
static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
#endif
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
@@ -1787,7 +1754,7 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
#ifndef __HAVE_ARCH_PTE_DEVMAP
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
return 0;
@@ -2791,11 +2758,17 @@ extern int randomize_va_space;
#endif
const char * arch_vma_name(struct vm_area_struct *vma);
#ifdef CONFIG_MMU
void print_vma_addr(char *prefix, unsigned long rip);
#else
static inline void print_vma_addr(char *prefix, unsigned long rip)
{
}
#endif
void *sparse_buffer_alloc(unsigned long size);
struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
struct vmem_altmap *altmap);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);

View File

@@ -158,7 +158,7 @@ struct page {
struct { /* ZONE_DEVICE pages */
/** @pgmap: Points to the hosting device page map. */
struct dev_pagemap *pgmap;
unsigned long hmm_data;
void *zone_device_data;
unsigned long _zd_pad_1; /* uses mapping */
};
@@ -503,7 +503,7 @@ struct mm_struct {
#endif
struct work_struct async_put_work;
#if IS_ENABLED(CONFIG_HMM)
#ifdef CONFIG_HMM_MIRROR
/* HMM needs to track a few things per mm */
struct hmm *hmm;
#endif

View File

@@ -855,18 +855,6 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
static inline bool is_dev_zone(const struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
static inline bool is_dev_zone(const struct zone *zone)
{
return false;
}
#endif
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -1160,6 +1148,29 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
#define SUBSECTION_SHIFT 21
#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
#error Subsection size exceeds section size
#else
#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
#endif
#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
/* See declaration of similar field in struct zone */
unsigned long pageblock_flags[0];
};
void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
struct page;
struct page_ext;
struct mem_section {
@@ -1177,8 +1188,7 @@ struct mem_section {
*/
unsigned long section_mem_map;
/* See declaration of similar field in struct zone */
unsigned long *pageblock_flags;
struct mem_section_usage *usage;
#ifdef CONFIG_PAGE_EXTENSION
/*
* If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
@@ -1209,6 +1219,11 @@ extern struct mem_section **mem_section;
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
static inline unsigned long *section_to_usemap(struct mem_section *ms)
{
return ms->usage->pageblock_flags;
}
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
#ifdef CONFIG_SPARSEMEM_EXTREME
@@ -1219,8 +1234,8 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
return NULL;
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
}
extern int __section_nr(struct mem_section* ms);
extern unsigned long usemap_size(void);
extern unsigned long __section_nr(struct mem_section *ms);
extern size_t mem_section_usage_size(void);
/*
* We use the lower bits of the mem_map pointer to store
@@ -1238,7 +1253,8 @@ extern unsigned long usemap_size(void);
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_IS_ONLINE (1UL<<2)
#define SECTION_MAP_LAST_BIT (1UL<<3)
#define SECTION_IS_EARLY (1UL<<3)
#define SECTION_MAP_LAST_BIT (1UL<<4)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
#define SECTION_NID_SHIFT 3
@@ -1264,6 +1280,11 @@ static inline int valid_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
static inline int early_section(struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
static inline int valid_section_nr(unsigned long nr)
{
return valid_section(__nr_to_section(nr));
@@ -1291,14 +1312,42 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
return __nr_to_section(pfn_to_section_nr(pfn));
}
extern int __highest_present_section_nr;
extern unsigned long __highest_present_section_nr;
static inline int subsection_map_index(unsigned long pfn)
{
return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
return test_bit(idx, ms->usage->subsection_map);
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
return 1;
}
#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
ms = __nr_to_section(pfn_to_section_nr(pfn));
if (!valid_section(ms))
return 0;
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
return early_section(ms) || pfn_section_valid(ms, pfn);
}
#endif
@@ -1330,6 +1379,7 @@ void sparse_init(void);
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#define pfn_present pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
/*

View File

@@ -16,6 +16,25 @@ typedef unsigned long kernel_ulong_t;
#define PCI_ANY_ID (~0)
/**
* struct pci_device_id - PCI device ID structure
* @vendor: Vendor ID to match (or PCI_ANY_ID)
* @device: Device ID to match (or PCI_ANY_ID)
* @subvendor: Subsystem vendor ID to match (or PCI_ANY_ID)
* @subdevice: Subsystem device ID to match (or PCI_ANY_ID)
* @class: Device class, subclass, and "interface" to match.
* See Appendix D of the PCI Local Bus Spec or
* include/linux/pci_ids.h for a full list of classes.
* Most drivers do not need to specify class/class_mask
* as vendor/device is normally sufficient.
* @class_mask: Limit which sub-fields of the class field are compared.
* See drivers/scsi/sym53c8xx_2/ for example of usage.
* @driver_data: Data private to the driver.
* Most drivers don't need to use driver_data field.
* Best practice is to use driver_data as an index
* into a static list of equivalent device types,
* instead of using it as a pointer.
*/
struct pci_device_id {
__u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
__u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
@@ -257,17 +276,17 @@ struct pcmcia_device_id {
__u16 match_flags;
__u16 manf_id;
__u16 card_id;
__u16 card_id;
__u8 func_id;
__u8 func_id;
/* for real multi-function devices */
__u8 function;
__u8 function;
/* for pseudo multi-function devices */
__u8 device_no;
__u8 device_no;
__u32 prod_id_hash[4];
__u32 prod_id_hash[4];
/* not matched against in kernelspace */
const char * prod_id[4];
@@ -798,6 +817,7 @@ struct tee_client_device_id {
*/
struct wmi_device_id {
const char guid_string[UUID_STRING_LEN+1];
const void *context;
};
#endif /* LINUX_MOD_DEVICETABLE_H */

View File

@@ -29,6 +29,11 @@ void *module_alloc(unsigned long size);
/* Free memory returned from module_alloc. */
void module_memfree(void *module_region);
/* Determines if the section name is an exit section (that is only used during
* module unloading)
*/
bool module_exit_section(const char *name);
/*
* Apply the given relocation to the (simplified) ELF. Return -error
* or 0.

View File

@@ -64,6 +64,10 @@ struct ti_sci_inta_msi_desc {
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
* @write_msi_msg_data: Data parameter for the callback.
*
* @masked: [PCI MSI/X] Mask bits
* @is_msix: [PCI MSI/X] True if MSI-X
* @multiple: [PCI MSI/X] log2 num of messages allocated
@@ -90,6 +94,9 @@ struct msi_desc {
const void *iommu_cookie;
#endif
void (*write_msi_msg)(struct msi_desc *entry, void *data);
void *write_msi_msg_data;
union {
/* PCI MSI/X specific data */
struct {
@@ -100,6 +107,7 @@ struct msi_desc {
u8 multi_cap : 3;
u8 maskbit : 1;
u8 is_64 : 1;
u8 is_virtual : 1;
u16 entry_nr;
unsigned default_irq;
} msi_attrib;

View File

@@ -219,6 +219,13 @@ struct cfi_pri_amdstd {
uint8_t VppMin;
uint8_t VppMax;
uint8_t TopBottom;
/* Below field are added from version 1.5 */
uint8_t ProgramSuspend;
uint8_t UnlockBypass;
uint8_t SecureSiliconSector;
uint8_t SoftwareFeatures;
#define CFI_POLL_STATUS_REG BIT(0)
#define CFI_POLL_DQ BIT(1)
} __packed;
/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */

View File

@@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
*/
#ifndef __LINUX_MTD_HYPERBUS_H__
#define __LINUX_MTD_HYPERBUS_H__
#include <linux/mtd/map.h>
enum hyperbus_memtype {
HYPERFLASH,
HYPERRAM,
};
/**
* struct hyperbus_device - struct representing HyperBus slave device
* @map: map_info struct for accessing MMIO HyperBus flash memory
* @np: pointer to HyperBus slave device node
* @mtd: pointer to MTD struct
* @ctlr: pointer to HyperBus controller struct
* @memtype: type of memory device: HyperFlash or HyperRAM
*/
struct hyperbus_device {
struct map_info map;
struct device_node *np;
struct mtd_info *mtd;
struct hyperbus_ctlr *ctlr;
enum hyperbus_memtype memtype;
};
/**
* struct hyperbus_ops - struct representing custom HyperBus operations
* @read16: read 16 bit of data from flash in a single burst. Used to read
* from non default address space, such as ID/CFI space
* @write16: write 16 bit of data to flash in a single burst. Used to
* send cmd to flash or write single 16 bit word at a time.
* @copy_from: copy data from flash memory
* @copy_to: copy data to flash memory
* @calibrate: calibrate HyperBus controller
*/
struct hyperbus_ops {
u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr);
void (*write16)(struct hyperbus_device *hbdev,
unsigned long addr, u16 val);
void (*copy_from)(struct hyperbus_device *hbdev, void *to,
unsigned long from, ssize_t len);
void (*copy_to)(struct hyperbus_device *dev, unsigned long to,
const void *from, ssize_t len);
int (*calibrate)(struct hyperbus_device *dev);
};
/**
* struct hyperbus_ctlr - struct representing HyperBus controller
* @dev: pointer to HyperBus controller device
* @calibrated: flag to indicate ctlr calibration sequence is complete
* @ops: HyperBus controller ops
*/
struct hyperbus_ctlr {
struct device *dev;
bool calibrated;
const struct hyperbus_ops *ops;
};
/**
* hyperbus_register_device - probe and register a HyperBus slave memory device
* @hbdev: hyperbus_device struct with dev, np and ctlr field populated
*
* Return: 0 for success, others for failure.
*/
int hyperbus_register_device(struct hyperbus_device *hbdev);
/**
* hyperbus_unregister_device - deregister HyperBus slave memory device
* @hbdev: hyperbus_device to be unregistered
*
* Return: 0 for success, others for failure.
*/
int hyperbus_unregister_device(struct hyperbus_device *hbdev);
#endif /* __LINUX_MTD_HYPERBUS_H__ */

View File

@@ -316,6 +316,12 @@ struct mtd_info {
int (*_get_device) (struct mtd_info *mtd);
void (*_put_device) (struct mtd_info *mtd);
/*
* flag indicates a panic write, low level drivers can take appropriate
* action if required to ensure writes go through
*/
bool oops_panic_write;
struct notifier_block reboot_notifier; /* default mode before reboot */
/* ECC status information */

View File

@@ -77,6 +77,7 @@
#define ONENAND_DEVICE_DENSITY_1Gb (0x003)
#define ONENAND_DEVICE_DENSITY_2Gb (0x004)
#define ONENAND_DEVICE_DENSITY_4Gb (0x005)
#define ONENAND_DEVICE_DENSITY_8Gb (0x006)
/*
* Version ID Register F002h (R)

View File

@@ -874,6 +874,42 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
static inline void nand_op_trace(const char *prefix,
const struct nand_op_instr *instr)
{
#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
switch (instr->type) {
case NAND_OP_CMD_INSTR:
pr_debug("%sCMD [0x%02x]\n", prefix,
instr->ctx.cmd.opcode);
break;
case NAND_OP_ADDR_INSTR:
pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
instr->ctx.addr.naddrs,
instr->ctx.addr.naddrs < 64 ?
instr->ctx.addr.naddrs : 64,
instr->ctx.addr.addrs);
break;
case NAND_OP_DATA_IN_INSTR:
pr_debug("%sDATA_IN [%d B%s]\n", prefix,
instr->ctx.data.len,
instr->ctx.data.force_8bit ?
", force 8-bit" : "");
break;
case NAND_OP_DATA_OUT_INSTR:
pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
instr->ctx.data.len,
instr->ctx.data.force_8bit ?
", force 8-bit" : "");
break;
case NAND_OP_WAITRDY_INSTR:
pr_debug("%sWAITRDY [max %d ms]\n", prefix,
instr->ctx.waitrdy.timeout_ms);
break;
}
#endif
}
/**
* struct nand_controller_ops - Controller operations
*

View File

@@ -68,30 +68,60 @@
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(3, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(3, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PROG_EXEC_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
@@ -197,6 +227,7 @@ struct spinand_manufacturer {
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
@@ -260,7 +291,7 @@ struct spinand_ecc_info {
*/
struct spinand_info {
const char *model;
u8 devid;
u16 devid;
u32 flags;
struct nand_memory_organization memorg;
struct nand_ecc_req eccreq;
@@ -422,7 +453,7 @@ static inline void spinand_set_of_node(struct spinand_device *spinand,
int spinand_match_and_init(struct spinand_device *dev,
const struct spinand_info *table,
unsigned int table_size, u8 devid);
unsigned int table_size, u16 devid);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);

View File

@@ -151,7 +151,7 @@ static inline bool mutex_is_locked(struct mutex *lock)
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.txt.
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);

View File

@@ -660,6 +660,7 @@ enum pnfs_update_layout_reason {
PNFS_UPDATE_LAYOUT_BLOCKED,
PNFS_UPDATE_LAYOUT_INVALID_OPEN,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
PNFS_UPDATE_LAYOUT_EXIT,
};
#define NFS4_OP_MAP_NUM_LONGS \

View File

@@ -223,6 +223,8 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */
#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */
#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
#define NFS_INO_DATA_INVAL_DEFER \
BIT(13) /* Deferred cache invalidation */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \

View File

@@ -58,6 +58,7 @@ struct nfs_client {
struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */
u32 cl_minorversion;/* NFSv4 minorversion */
unsigned int cl_nconnect; /* Number of connections */
const char * cl_principal; /* used for machine cred */
#if IS_ENABLED(CONFIG_NFS_V4)

View File

@@ -137,10 +137,7 @@ static inline int register_one_node(int nid)
extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
void *arg);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index);
extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
@@ -171,15 +168,8 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
return 0;
}
static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
void *arg)
static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
return 0;
}
static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index)
{
return 0;
}
static inline void register_hugetlbfs_with_node(node_registration_func_t reg,

Some files were not shown because too many files have changed in this diff Show More