Merge branch 'pm-opp' into pm-cpufreq
This commit is contained in:
@@ -63,6 +63,18 @@
|
||||
#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
|
||||
// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
|
||||
#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
|
||||
#define CEPH_FEATURE_MON_METADATA (1ULL<<50)
|
||||
#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */
|
||||
#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52)
|
||||
#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53)
|
||||
#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54)
|
||||
#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55)
|
||||
#define CEPH_FEATURE_NEW_OSDOP_ENCODING (1ULL<<56) /* New, v7 encoding */
|
||||
#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */
|
||||
#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */
|
||||
#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */
|
||||
// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
|
||||
#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
|
||||
|
||||
/*
|
||||
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
|
||||
@@ -108,7 +120,9 @@ static inline u64 ceph_sanitize_features(u64 features)
|
||||
CEPH_FEATURE_CRUSH_TUNABLES3 | \
|
||||
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
|
||||
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
|
||||
CEPH_FEATURE_CRUSH_V4)
|
||||
CEPH_FEATURE_CRUSH_V4 | \
|
||||
CEPH_FEATURE_CRUSH_TUNABLES5 | \
|
||||
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
|
||||
|
||||
#define CEPH_FEATURES_REQUIRED_DEFAULT \
|
||||
(CEPH_FEATURE_NOSRCADDR | \
|
||||
|
@@ -59,7 +59,8 @@ enum {
|
||||
CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
|
||||
CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
|
||||
CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
|
||||
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
|
||||
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
|
||||
CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -205,6 +206,11 @@ struct crush_map {
|
||||
* mappings line up a bit better with previous mappings. */
|
||||
__u8 chooseleaf_vary_r;
|
||||
|
||||
/* if true, it makes chooseleaf firstn to return stable results (if
|
||||
* no local retry) so that data migrations would be optimal when some
|
||||
* device fails. */
|
||||
__u8 chooseleaf_stable;
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/*
|
||||
* version 0 (original) of straw_calc has various flaws. version 1
|
||||
|
@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
|
||||
dax_iodone_t);
|
||||
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
|
||||
dax_iodone_t);
|
||||
|
||||
#ifdef CONFIG_FS_DAX
|
||||
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
|
||||
#else
|
||||
static inline struct page *read_dax_sector(struct block_device *bdev,
|
||||
sector_t n)
|
||||
{
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
||||
unsigned int flags, get_block_t, dax_iodone_t);
|
||||
|
@@ -484,9 +484,6 @@ struct block_device {
|
||||
int bd_fsfreeze_count;
|
||||
/* Mutex for freeze */
|
||||
struct mutex bd_fsfreeze_mutex;
|
||||
#ifdef CONFIG_FS_DAX
|
||||
int bd_map_count;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
|
||||
|
||||
static inline bool io_is_direct(struct file *filp)
|
||||
{
|
||||
return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
|
||||
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
|
||||
}
|
||||
|
||||
static inline int iocb_flags(struct file *file)
|
||||
|
@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
|
||||
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
|
||||
/* The below functions must be run on a range from a single zone. */
|
||||
extern int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
unsigned migratetype);
|
||||
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
/* CMA stuff */
|
||||
extern void init_cma_reserved_pageblock(struct page *page);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_GFP_H */
|
||||
|
@@ -51,7 +51,7 @@ enum mem_cgroup_stat_index {
|
||||
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
|
||||
MEM_CGROUP_STAT_NSTATS,
|
||||
/* default hierarchy stats */
|
||||
MEMCG_SOCK,
|
||||
MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
|
||||
MEMCG_NR_STAT,
|
||||
};
|
||||
|
||||
|
@@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STACK_GROWSUP
|
||||
#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
#define VM_STACK VM_GROWSUP
|
||||
#else
|
||||
#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
#define VM_STACK VM_GROWSDOWN
|
||||
#endif
|
||||
|
||||
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
|
||||
/*
|
||||
* Special vmas that are non-mergable, non-mlock()able.
|
||||
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
|
||||
@@ -1341,8 +1343,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
||||
!vma_growsup(vma->vm_next, addr);
|
||||
}
|
||||
|
||||
extern struct task_struct *task_of_stack(struct task_struct *task,
|
||||
struct vm_area_struct *vma, bool in_group);
|
||||
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
|
@@ -424,9 +424,9 @@ struct mm_struct {
|
||||
unsigned long total_vm; /* Total pages mapped */
|
||||
unsigned long locked_vm; /* Pages that have PG_mlocked set */
|
||||
unsigned long pinned_vm; /* Refcount permanently increased */
|
||||
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
|
||||
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
|
||||
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
|
||||
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
|
||||
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
|
||||
unsigned long stack_vm; /* VM_STACK */
|
||||
unsigned long def_flags;
|
||||
unsigned long start_code, end_code, start_data, end_data;
|
||||
unsigned long start_brk, brk, start_stack;
|
||||
|
@@ -682,6 +682,12 @@ typedef struct pglist_data {
|
||||
*/
|
||||
unsigned long first_deferred_pfn;
|
||||
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
spinlock_t split_queue_lock;
|
||||
struct list_head split_queue;
|
||||
unsigned long split_queue_len;
|
||||
#endif
|
||||
} pg_data_t;
|
||||
|
||||
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
|
||||
|
@@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
|
||||
clear_bit(NAPI_STATE_NPSVC, &n->state);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* napi_synchronize - wait until NAPI is not running
|
||||
* @n: napi context
|
||||
@@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
|
||||
*/
|
||||
static inline void napi_synchronize(const struct napi_struct *n)
|
||||
{
|
||||
while (test_bit(NAPI_STATE_SCHED, &n->state))
|
||||
msleep(1);
|
||||
if (IS_ENABLED(CONFIG_SMP))
|
||||
while (test_bit(NAPI_STATE_SCHED, &n->state))
|
||||
msleep(1);
|
||||
else
|
||||
barrier();
|
||||
}
|
||||
#else
|
||||
# define napi_synchronize(n) barrier()
|
||||
#endif
|
||||
|
||||
enum netdev_queue_state_t {
|
||||
__QUEUE_STATE_DRV_XOFF,
|
||||
|
@@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
|
||||
return num;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#if defined(CONFIG_OF) && !defined(MODULE)
|
||||
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
|
||||
static const struct of_device_id __of_table_##name \
|
||||
__used __section(__##table##_of_table) \
|
||||
|
@@ -29,7 +29,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
|
||||
return __pfn_to_pfn_t(pfn, 0);
|
||||
}
|
||||
|
||||
extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags);
|
||||
extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
|
||||
|
||||
static inline bool pfn_t_has_page(pfn_t pfn)
|
||||
{
|
||||
@@ -48,7 +48,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline dma_addr_t pfn_t_to_phys(pfn_t pfn)
|
||||
static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
|
||||
{
|
||||
return PFN_PHYS(pfn_t_to_pfn(pfn));
|
||||
}
|
||||
|
@@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
|
||||
|
||||
int dev_pm_opp_get_opp_count(struct device *dev);
|
||||
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
|
||||
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
|
||||
unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
|
||||
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
|
||||
|
||||
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
|
||||
@@ -60,6 +62,9 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
|
||||
void dev_pm_opp_put_supported_hw(struct device *dev);
|
||||
int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
|
||||
void dev_pm_opp_put_prop_name(struct device *dev);
|
||||
int dev_pm_opp_set_regulator(struct device *dev, const char *name);
|
||||
void dev_pm_opp_put_regulator(struct device *dev);
|
||||
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
|
||||
#else
|
||||
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
|
||||
{
|
||||
@@ -86,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
@@ -151,6 +166,18 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
|
||||
|
||||
static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
|
||||
|
||||
static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void dev_pm_opp_put_regulator(struct device *dev) {}
|
||||
|
||||
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM_OPP */
|
||||
|
||||
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
|
||||
|
@@ -378,13 +378,29 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
|
||||
void **radix_tree_next_chunk(struct radix_tree_root *root,
|
||||
struct radix_tree_iter *iter, unsigned flags);
|
||||
|
||||
/**
|
||||
* radix_tree_iter_retry - retry this chunk of the iteration
|
||||
* @iter: iterator state
|
||||
*
|
||||
* If we iterate over a tree protected only by the RCU lock, a race
|
||||
* against deletion or creation may result in seeing a slot for which
|
||||
* radix_tree_deref_retry() returns true. If so, call this function
|
||||
* and continue the iteration.
|
||||
*/
|
||||
static inline __must_check
|
||||
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
|
||||
{
|
||||
iter->next_index = iter->index;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* radix_tree_chunk_size - get current chunk size
|
||||
*
|
||||
* @iter: pointer to radix tree iterator
|
||||
* Returns: current chunk size
|
||||
*/
|
||||
static __always_inline unsigned
|
||||
static __always_inline long
|
||||
radix_tree_chunk_size(struct radix_tree_iter *iter)
|
||||
{
|
||||
return iter->next_index - iter->index;
|
||||
@@ -418,9 +434,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
|
||||
return slot + offset + 1;
|
||||
}
|
||||
} else {
|
||||
unsigned size = radix_tree_chunk_size(iter) - 1;
|
||||
long size = radix_tree_chunk_size(iter);
|
||||
|
||||
while (size--) {
|
||||
while (--size > 0) {
|
||||
slot++;
|
||||
iter->index++;
|
||||
if (likely(*slot))
|
||||
|
@@ -152,6 +152,8 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
|
||||
|
||||
# define jiffies raid6_jiffies()
|
||||
# define printk printf
|
||||
# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
|
||||
# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
|
||||
# define GFP_KERNEL 0
|
||||
# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
|
||||
PROT_READ|PROT_WRITE, \
|
||||
|
@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
|
||||
__put_anon_vma(anon_vma);
|
||||
}
|
||||
|
||||
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
if (anon_vma)
|
||||
down_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
||||
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
if (anon_vma)
|
||||
up_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
||||
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
|
||||
{
|
||||
down_write(&anon_vma->root->rwsem);
|
||||
|
Reference in New Issue
Block a user