Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes. For example, David Ahern's adjacency list revamp in 'net-next' conflicted with an adjacency list traversal bug fix in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -326,6 +326,7 @@ struct pci_dev;
|
||||
int acpi_pci_irq_enable (struct pci_dev *dev);
|
||||
void acpi_penalize_isa_irq(int irq, int active);
|
||||
bool acpi_isa_irq_available(int irq);
|
||||
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
|
||||
void acpi_pci_irq_disable (struct pci_dev *dev);
|
||||
|
||||
extern int ec_read(u8 addr, u8 *val);
|
||||
@@ -946,9 +947,17 @@ struct acpi_reference_args {
|
||||
#ifdef CONFIG_ACPI
|
||||
int acpi_dev_get_property(struct acpi_device *adev, const char *name,
|
||||
acpi_object_type type, const union acpi_object **obj);
|
||||
int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args);
|
||||
int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index, size_t num_args,
|
||||
struct acpi_reference_args *args);
|
||||
|
||||
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args)
|
||||
{
|
||||
return __acpi_node_get_property_reference(fwnode, name, index,
|
||||
MAX_ACPI_REFERENCE_ARGS, args);
|
||||
}
|
||||
|
||||
int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
|
||||
void **valptr);
|
||||
@@ -1024,6 +1033,14 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index, size_t num_args,
|
||||
struct acpi_reference_args *args)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args)
|
||||
|
@@ -105,6 +105,7 @@ enum {
|
||||
ATA_ID_CFA_KEY_MGMT = 162,
|
||||
ATA_ID_CFA_MODES = 163,
|
||||
ATA_ID_DATA_SET_MGMT = 169,
|
||||
ATA_ID_SCT_CMD_XPORT = 206,
|
||||
ATA_ID_ROT_SPEED = 217,
|
||||
ATA_ID_PIO4 = (1 << 1),
|
||||
|
||||
@@ -788,6 +789,48 @@ static inline bool ata_id_sense_reporting_enabled(const u16 *id)
|
||||
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Word: 206 - SCT Command Transport
|
||||
* 15:12 - Vendor Specific
|
||||
* 11:6 - Reserved
|
||||
* 5 - SCT Command Transport Data Tables supported
|
||||
* 4 - SCT Command Transport Features Control supported
|
||||
* 3 - SCT Command Transport Error Recovery Control supported
|
||||
* 2 - SCT Command Transport Write Same supported
|
||||
* 1 - SCT Command Transport Long Sector Access supported
|
||||
* 0 - SCT Command Transport supported
|
||||
*/
|
||||
static inline bool ata_id_sct_data_tables(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_features_ctrl(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_write_same(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_long_sector_access(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_supported(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_id_major_version - get ATA level of drive
|
||||
* @id: Identify data
|
||||
@@ -1071,32 +1114,6 @@ static inline void ata_id_to_hd_driveid(u16 *id)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Write LBA Range Entries to the buffer that will cover the extent from
|
||||
* sector to sector + count. This is used for TRIM and for ADD LBA(S)
|
||||
* TO NV CACHE PINNED SET.
|
||||
*/
|
||||
static inline unsigned ata_set_lba_range_entries(void *_buffer,
|
||||
unsigned num, u64 sector, unsigned long count)
|
||||
{
|
||||
__le64 *buffer = _buffer;
|
||||
unsigned i = 0, used_bytes;
|
||||
|
||||
while (i < num) {
|
||||
u64 entry = sector |
|
||||
((u64)(count > 0xffff ? 0xffff : count) << 48);
|
||||
buffer[i++] = __cpu_to_le64(entry);
|
||||
if (count <= 0xffff)
|
||||
break;
|
||||
count -= 0xffff;
|
||||
sector += 0xffff;
|
||||
}
|
||||
|
||||
used_bytes = ALIGN(i * 8, 512);
|
||||
memset(buffer + i, 0, used_bytes - i * 8);
|
||||
return used_bytes;
|
||||
}
|
||||
|
||||
static inline bool ata_ok(u8 status)
|
||||
{
|
||||
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
|
||||
|
@@ -343,16 +343,7 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
|
||||
*/
|
||||
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
||||
{
|
||||
char *p;
|
||||
|
||||
p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
if (!p) {
|
||||
strncpy(buf, "<unavailable>", buflen);
|
||||
return -ENAMETOOLONG;
|
||||
}
|
||||
|
||||
memmove(buf, p, buf + buflen - p);
|
||||
return 0;
|
||||
return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -97,7 +97,7 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_rm_cftypes(struct cftype *cfts);
|
||||
void cgroup_file_notify(struct cgroup_file *cfile);
|
||||
|
||||
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
|
||||
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *tsk);
|
||||
@@ -555,8 +555,7 @@ static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
|
||||
return kernfs_name(cgrp->kn, buf, buflen);
|
||||
}
|
||||
|
||||
static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
|
||||
size_t buflen)
|
||||
static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
|
||||
{
|
||||
return kernfs_path(cgrp->kn, buf, buflen);
|
||||
}
|
||||
@@ -658,8 +657,8 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
|
||||
struct user_namespace *user_ns,
|
||||
struct cgroup_namespace *old_ns);
|
||||
|
||||
char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
|
||||
struct cgroup_namespace *ns);
|
||||
int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
|
||||
struct cgroup_namespace *ns);
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
|
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
|
||||
* routines, one at of_clk_init(), and one at platform device probe
|
||||
*/
|
||||
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
|
||||
static void name##_of_clk_init_driver(struct device_node *np) \
|
||||
static void __init name##_of_clk_init_driver(struct device_node *np) \
|
||||
{ \
|
||||
of_node_clear_flag(np, OF_POPULATED); \
|
||||
fn(np); \
|
||||
|
@@ -188,6 +188,13 @@
|
||||
#endif /* GCC_VERSION >= 40300 */
|
||||
|
||||
#if GCC_VERSION >= 40500
|
||||
|
||||
#ifndef __CHECKER__
|
||||
#ifdef LATENT_ENTROPY_PLUGIN
|
||||
#define __latent_entropy __attribute__((latent_entropy))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mark a position in code as unreachable. This can be used to
|
||||
* suppress control flow warnings after asm blocks that transfer
|
||||
|
@@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||
# define unreachable() do { } while (1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* KENTRY - kernel entry point
|
||||
* This can be used to annotate symbols (functions or data) that are used
|
||||
* without their linker symbol being referenced explicitly. For example,
|
||||
* interrupt vector handlers, or functions in the kernel image that are found
|
||||
* programatically.
|
||||
*
|
||||
* Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
|
||||
* are handled in their own way (with KEEP() in linker scripts).
|
||||
*
|
||||
* KENTRY can be avoided if the symbols in question are marked as KEEP() in the
|
||||
* linker script. For example an architecture could KEEP() its entire
|
||||
* boot/exception vector code rather than annotate each function and data.
|
||||
*/
|
||||
#ifndef KENTRY
|
||||
# define KENTRY(sym) \
|
||||
extern typeof(sym) sym; \
|
||||
static const unsigned long __kentry_##sym \
|
||||
__used \
|
||||
__attribute__((section("___kentry" "+" #sym ), used)) \
|
||||
= (unsigned long)&sym;
|
||||
#endif
|
||||
|
||||
#ifndef RELOC_HIDE
|
||||
# define RELOC_HIDE(ptr, off) \
|
||||
({ unsigned long __ptr; \
|
||||
@@ -406,6 +429,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
# define __attribute_const__ /* unimplemented */
|
||||
#endif
|
||||
|
||||
#ifndef __latent_entropy
|
||||
# define __latent_entropy
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Tell gcc if a function is cold. The compiler will assume any path
|
||||
* directly leading to the call is unlikely.
|
||||
|
@@ -639,19 +639,19 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||
unsigned int freq;
|
||||
int i, best = -1;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
freq = table[i].frequency;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if (freq >= target_freq)
|
||||
return i;
|
||||
return pos - table;
|
||||
|
||||
best = i;
|
||||
best = pos;
|
||||
}
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
/* Find lowest freq at or above target in a table in descending order */
|
||||
@@ -659,28 +659,28 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||
unsigned int freq;
|
||||
int i, best = -1;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
freq = table[i].frequency;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if (freq == target_freq)
|
||||
return i;
|
||||
return pos - table;
|
||||
|
||||
if (freq > target_freq) {
|
||||
best = i;
|
||||
best = pos;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* No freq found above target_freq */
|
||||
if (best == -1)
|
||||
return i;
|
||||
if (best == table - 1)
|
||||
return pos - table;
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
@@ -700,28 +700,28 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||
unsigned int freq;
|
||||
int i, best = -1;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
freq = table[i].frequency;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if (freq == target_freq)
|
||||
return i;
|
||||
return pos - table;
|
||||
|
||||
if (freq < target_freq) {
|
||||
best = i;
|
||||
best = pos;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* No freq found below target_freq */
|
||||
if (best == -1)
|
||||
return i;
|
||||
if (best == table - 1)
|
||||
return pos - table;
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
/* Find highest freq at or below target in a table in descending order */
|
||||
@@ -729,19 +729,19 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||
unsigned int freq;
|
||||
int i, best = -1;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
freq = table[i].frequency;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if (freq <= target_freq)
|
||||
return i;
|
||||
return pos - table;
|
||||
|
||||
best = i;
|
||||
best = pos;
|
||||
}
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
@@ -761,32 +761,32 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||
unsigned int freq;
|
||||
int i, best = -1;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
freq = table[i].frequency;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if (freq == target_freq)
|
||||
return i;
|
||||
return pos - table;
|
||||
|
||||
if (freq < target_freq) {
|
||||
best = i;
|
||||
best = pos;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* No freq found below target_freq */
|
||||
if (best == -1)
|
||||
return i;
|
||||
if (best == table - 1)
|
||||
return pos - table;
|
||||
|
||||
/* Choose the closest freq */
|
||||
if (target_freq - table[best].frequency > freq - target_freq)
|
||||
return i;
|
||||
if (target_freq - best->frequency > freq - target_freq)
|
||||
return pos - table;
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
/* Find closest freq to target in a table in descending order */
|
||||
@@ -794,32 +794,32 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||
unsigned int freq;
|
||||
int i, best = -1;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
freq = table[i].frequency;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if (freq == target_freq)
|
||||
return i;
|
||||
return pos - table;
|
||||
|
||||
if (freq > target_freq) {
|
||||
best = i;
|
||||
best = pos;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* No freq found above target_freq */
|
||||
if (best == -1)
|
||||
return i;
|
||||
if (best == table - 1)
|
||||
return pos - table;
|
||||
|
||||
/* Choose the closest freq */
|
||||
if (table[best].frequency - target_freq > target_freq - freq)
|
||||
return i;
|
||||
if (best->frequency - target_freq > target_freq - freq)
|
||||
return pos - table;
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
return best;
|
||||
return best - table;
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
|
@@ -81,6 +81,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
|
||||
CPUHP_AP_DUMMY_TIMER_STARTING,
|
||||
CPUHP_AP_JCORE_TIMER_STARTING,
|
||||
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_TWD_STARTING,
|
||||
CPUHP_AP_METAG_TIMER_STARTING,
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#ifndef _LINUX_EXPORT_H
|
||||
#define _LINUX_EXPORT_H
|
||||
|
||||
/*
|
||||
* Export symbols from the kernel to modules. Forked from module.h
|
||||
* to reduce the amount of pointless cruft we feed to gcc when only
|
||||
@@ -42,27 +43,26 @@ extern struct module __this_module;
|
||||
#ifdef CONFIG_MODVERSIONS
|
||||
/* Mark the CRC weak since genksyms apparently decides not to
|
||||
* generate a checksums for some symbols */
|
||||
#define __CRC_SYMBOL(sym, sec) \
|
||||
extern __visible void *__crc_##sym __attribute__((weak)); \
|
||||
static const unsigned long __kcrctab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___kcrctab" sec "+" #sym), unused)) \
|
||||
#define __CRC_SYMBOL(sym, sec) \
|
||||
extern __visible void *__crc_##sym __attribute__((weak)); \
|
||||
static const unsigned long __kcrctab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___kcrctab" sec "+" #sym), used)) \
|
||||
= (unsigned long) &__crc_##sym;
|
||||
#else
|
||||
#define __CRC_SYMBOL(sym, sec)
|
||||
#endif
|
||||
|
||||
/* For every exported symbol, place a struct in the __ksymtab section */
|
||||
#define ___EXPORT_SYMBOL(sym, sec) \
|
||||
extern typeof(sym) sym; \
|
||||
__CRC_SYMBOL(sym, sec) \
|
||||
static const char __kstrtab_##sym[] \
|
||||
__attribute__((section("__ksymtab_strings"), aligned(1))) \
|
||||
= VMLINUX_SYMBOL_STR(sym); \
|
||||
extern const struct kernel_symbol __ksymtab_##sym; \
|
||||
__visible const struct kernel_symbol __ksymtab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___ksymtab" sec "+" #sym), unused)) \
|
||||
#define ___EXPORT_SYMBOL(sym, sec) \
|
||||
extern typeof(sym) sym; \
|
||||
__CRC_SYMBOL(sym, sec) \
|
||||
static const char __kstrtab_##sym[] \
|
||||
__attribute__((section("__ksymtab_strings"), aligned(1))) \
|
||||
= VMLINUX_SYMBOL_STR(sym); \
|
||||
static const struct kernel_symbol __ksymtab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
|
||||
= { (unsigned long)&sym, __kstrtab_##sym }
|
||||
|
||||
#if defined(__KSYM_DEPS__)
|
||||
|
@@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
|
||||
void put_files_struct(struct files_struct *fs);
|
||||
void reset_files_struct(struct files_struct *);
|
||||
int unshare_files(struct files_struct **);
|
||||
struct files_struct *dup_fd(struct files_struct *, int *);
|
||||
struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
|
||||
void do_close_on_exec(struct files_struct *);
|
||||
int iterate_fd(struct files_struct *, unsigned,
|
||||
int (*)(const void *, struct file *, unsigned),
|
||||
|
@@ -2934,6 +2934,7 @@ extern int vfs_stat(const char __user *, struct kstat *);
|
||||
extern int vfs_lstat(const char __user *, struct kstat *);
|
||||
extern int vfs_fstat(unsigned int, struct kstat *);
|
||||
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
|
||||
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
|
||||
|
||||
extern int __generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo,
|
||||
|
@@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
|
||||
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
|
||||
|
||||
/* drivers/char/random.c */
|
||||
extern void add_disk_randomness(struct gendisk *disk);
|
||||
extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
|
||||
extern void rand_initialize_disk(struct gendisk *disk);
|
||||
|
||||
static inline sector_t get_start_sect(struct block_device *bdev)
|
||||
|
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
|
||||
const char *mod_name);
|
||||
void vmbus_driver_unregister(struct hv_driver *hv_driver);
|
||||
|
||||
static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
|
||||
{
|
||||
const struct kobject *kobj = &device_obj->device.kobj;
|
||||
|
||||
return kobj->name;
|
||||
}
|
||||
|
||||
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
|
||||
|
||||
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
|
||||
|
@@ -39,7 +39,7 @@
|
||||
|
||||
/* These are for everybody (although not all archs will actually
|
||||
discard it in modules) */
|
||||
#define __init __section(.init.text) __cold notrace
|
||||
#define __init __section(.init.text) __cold notrace __latent_entropy
|
||||
#define __initdata __section(.init.data)
|
||||
#define __initconst __section(.init.rodata)
|
||||
#define __exitdata __section(.exit.data)
|
||||
@@ -75,7 +75,8 @@
|
||||
#define __exit __section(.exit.text) __exitused __cold notrace
|
||||
|
||||
/* Used for MEMORY_HOTPLUG */
|
||||
#define __meminit __section(.meminit.text) __cold notrace
|
||||
#define __meminit __section(.meminit.text) __cold notrace \
|
||||
__latent_entropy
|
||||
#define __meminitdata __section(.meminit.data)
|
||||
#define __meminitconst __section(.meminit.rodata)
|
||||
#define __memexit __section(.memexit.text) __exitused __cold notrace
|
||||
@@ -139,24 +140,8 @@ extern bool initcall_debug;
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_LTO
|
||||
/* Work around a LTO gcc problem: when there is no reference to a variable
|
||||
* in a module it will be moved to the end of the program. This causes
|
||||
* reordering of initcalls which the kernel does not like.
|
||||
* Add a dummy reference function to avoid this. The function is
|
||||
* deleted by the linker.
|
||||
*/
|
||||
#define LTO_REFERENCE_INITCALL(x) \
|
||||
; /* yes this is needed */ \
|
||||
static __used __exit void *reference_##x(void) \
|
||||
{ \
|
||||
return &x; \
|
||||
}
|
||||
#else
|
||||
#define LTO_REFERENCE_INITCALL(x)
|
||||
#endif
|
||||
|
||||
/* initcalls are now grouped by functionality into separate
|
||||
/*
|
||||
* initcalls are now grouped by functionality into separate
|
||||
* subsections. Ordering inside the subsections is determined
|
||||
* by link order.
|
||||
* For backwards compatibility, initcall() puts the call in
|
||||
@@ -164,12 +149,16 @@ extern bool initcall_debug;
|
||||
*
|
||||
* The `id' arg to __define_initcall() is needed so that multiple initcalls
|
||||
* can point at the same handler without causing duplicate-symbol build errors.
|
||||
*
|
||||
* Initcalls are run by placing pointers in initcall sections that the
|
||||
* kernel iterates at runtime. The linker can do dead code / data elimination
|
||||
* and remove that completely, so the initcall sections have to be marked
|
||||
* as KEEP() in the linker script.
|
||||
*/
|
||||
|
||||
#define __define_initcall(fn, id) \
|
||||
static initcall_t __initcall_##fn##id __used \
|
||||
__attribute__((__section__(".initcall" #id ".init"))) = fn; \
|
||||
LTO_REFERENCE_INITCALL(__initcall_##fn##id)
|
||||
__attribute__((__section__(".initcall" #id ".init"))) = fn;
|
||||
|
||||
/*
|
||||
* Early initcalls run before initializing SMP.
|
||||
@@ -205,15 +194,15 @@ extern bool initcall_debug;
|
||||
|
||||
#define __initcall(fn) device_initcall(fn)
|
||||
|
||||
#define __exitcall(fn) \
|
||||
#define __exitcall(fn) \
|
||||
static exitcall_t __exitcall_##fn __exit_call = fn
|
||||
|
||||
#define console_initcall(fn) \
|
||||
static initcall_t __initcall_##fn \
|
||||
#define console_initcall(fn) \
|
||||
static initcall_t __initcall_##fn \
|
||||
__used __section(.con_initcall.init) = fn
|
||||
|
||||
#define security_initcall(fn) \
|
||||
static initcall_t __initcall_##fn \
|
||||
#define security_initcall(fn) \
|
||||
static initcall_t __initcall_##fn \
|
||||
__used __section(.security_initcall.init) = fn
|
||||
|
||||
struct obs_kernel_param {
|
||||
|
@@ -141,4 +141,26 @@ enum {
|
||||
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
|
||||
void memunmap(void *addr);
|
||||
|
||||
/*
|
||||
* On x86 PAT systems we have memory tracking that keeps track of
|
||||
* the allowed mappings on memory ranges. This tracking works for
|
||||
* all the in-kernel mapping APIs (ioremap*), but where the user
|
||||
* wishes to map a range from a physical device into user memory
|
||||
* the tracking won't be updated. This API is to be used by
|
||||
* drivers which remap physical device pages into userspace,
|
||||
* and wants to make sure they are mapped WC and not UC.
|
||||
*/
|
||||
#ifndef arch_io_reserve_memtype_wc
|
||||
static inline int arch_io_reserve_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void arch_io_free_memtype_wc(resource_size_t base,
|
||||
resource_size_t size)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_IO_H */
|
||||
|
@@ -19,11 +19,15 @@ struct vm_fault;
|
||||
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
|
||||
|
||||
/*
|
||||
* Flags for iomap mappings:
|
||||
* Flags for all iomap mappings:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x02 /* block shared with another file */
|
||||
#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
|
||||
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
|
||||
|
||||
/*
|
||||
* Flags that only need to be reported for IOMAP_REPORT requests:
|
||||
*/
|
||||
#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
|
||||
#define IOMAP_F_SHARED 0x20 /* block shared with another file */
|
||||
|
||||
/*
|
||||
* Magic value for blkno:
|
||||
@@ -42,8 +46,9 @@ struct iomap {
|
||||
/*
|
||||
* Flags for iomap_begin / iomap_end. No flag implies a read.
|
||||
*/
|
||||
#define IOMAP_WRITE (1 << 0)
|
||||
#define IOMAP_ZERO (1 << 1)
|
||||
#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
|
||||
#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
|
||||
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
|
||||
|
||||
struct iomap_ops {
|
||||
/*
|
||||
|
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
static inline bool ipv6_l3mdev_skb(__u16 flags)
|
||||
{
|
||||
return flags & IP6SKB_L3SLAVE;
|
||||
}
|
||||
#else
|
||||
static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
static inline bool ipv6_l3mdev_skb(__u16 flags)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
|
||||
|
||||
static inline int inet6_iif(const struct sk_buff *skb)
|
||||
{
|
||||
bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
|
||||
bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
|
||||
|
||||
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
|
||||
}
|
||||
|
||||
/* can not be used in TCP layer after tcp_v6_fill_cb */
|
||||
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
|
||||
ipv6_l3mdev_skb(IP6CB(skb)->flags))
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
struct tcp6_request_sock {
|
||||
struct tcp_request_sock tcp6rsk_tcp;
|
||||
};
|
||||
|
@@ -290,7 +290,7 @@
|
||||
#define GITS_BASER_TYPE_SHIFT (56)
|
||||
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
|
||||
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
|
||||
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
|
||||
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_BASER_SHAREABILITY_SHIFT (10)
|
||||
#define GITS_BASER_InnerShareable \
|
||||
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
|
||||
|
@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
|
||||
void kasan_unpoison_shadow(const void *address, size_t size);
|
||||
|
||||
void kasan_unpoison_task_stack(struct task_struct *task);
|
||||
void kasan_unpoison_stack_above_sp_to(const void *watermark);
|
||||
|
||||
void kasan_alloc_pages(struct page *page, unsigned int order);
|
||||
void kasan_free_pages(struct page *page, unsigned int order);
|
||||
@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
|
||||
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
||||
|
||||
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
|
||||
static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
|
||||
|
||||
static inline void kasan_enable_current(void) {}
|
||||
static inline void kasan_disable_current(void) {}
|
||||
|
@@ -31,7 +31,6 @@
|
||||
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
|
||||
* the last step cherry picks the 2nd arg, we get a zero.
|
||||
*/
|
||||
#define config_enabled(cfg) ___is_defined(cfg)
|
||||
#define __is_defined(x) ___is_defined(x)
|
||||
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
|
||||
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
|
||||
@@ -41,13 +40,13 @@
|
||||
* otherwise. For boolean options, this is equivalent to
|
||||
* IS_ENABLED(CONFIG_FOO).
|
||||
*/
|
||||
#define IS_BUILTIN(option) config_enabled(option)
|
||||
#define IS_BUILTIN(option) __is_defined(option)
|
||||
|
||||
/*
|
||||
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
|
||||
* otherwise.
|
||||
*/
|
||||
#define IS_MODULE(option) config_enabled(option##_MODULE)
|
||||
#define IS_MODULE(option) __is_defined(option##_MODULE)
|
||||
|
||||
/*
|
||||
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
|
||||
|
@@ -269,10 +269,8 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
|
||||
}
|
||||
|
||||
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
|
||||
size_t kernfs_path_len(struct kernfs_node *kn);
|
||||
int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
|
||||
char *buf, size_t buflen);
|
||||
char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen);
|
||||
void pr_cont_kernfs_name(struct kernfs_node *kn);
|
||||
void pr_cont_kernfs_path(struct kernfs_node *kn);
|
||||
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
|
||||
@@ -341,12 +339,10 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
|
||||
static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
|
||||
{ return -ENOSYS; }
|
||||
|
||||
static inline size_t kernfs_path_len(struct kernfs_node *kn)
|
||||
{ return 0; }
|
||||
|
||||
static inline char *kernfs_path(struct kernfs_node *kn, char *buf,
|
||||
size_t buflen)
|
||||
{ return NULL; }
|
||||
static inline int kernfs_path_from_node(struct kernfs_node *root_kn,
|
||||
struct kernfs_node *kn,
|
||||
char *buf, size_t buflen)
|
||||
{ return -ENOSYS; }
|
||||
|
||||
static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
|
||||
static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { }
|
||||
@@ -436,6 +432,22 @@ static inline void kernfs_init(void) { }
|
||||
|
||||
#endif /* CONFIG_KERNFS */
|
||||
|
||||
/**
|
||||
* kernfs_path - build full path of a given node
|
||||
* @kn: kernfs_node of interest
|
||||
* @buf: buffer to copy @kn's name into
|
||||
* @buflen: size of @buf
|
||||
*
|
||||
* Builds and returns the full path of @kn in @buf of @buflen bytes. The
|
||||
* path is built from the end of @buf so the returned pointer usually
|
||||
* doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
|
||||
* and %NULL is returned.
|
||||
*/
|
||||
static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
|
||||
{
|
||||
return kernfs_path_from_node(kn, NULL, buf, buflen);
|
||||
}
|
||||
|
||||
static inline struct kernfs_node *
|
||||
kernfs_find_and_get(struct kernfs_node *kn, const char *name)
|
||||
{
|
||||
|
@@ -46,7 +46,8 @@
|
||||
#ifdef CONFIG_ATA_NONSTANDARD
|
||||
#include <asm/libata-portmap.h>
|
||||
#else
|
||||
#include <asm-generic/libata-portmap.h>
|
||||
#define ATA_PRIMARY_IRQ(dev) 14
|
||||
#define ATA_SECONDARY_IRQ(dev) 15
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
|
||||
u32 *lkey, u32 *rkey);
|
||||
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
|
||||
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
|
||||
int mlx4_test_interrupts(struct mlx4_dev *dev);
|
||||
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
|
||||
int mlx4_test_async(struct mlx4_dev *dev);
|
||||
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
|
||||
const u32 offset[], u32 value[],
|
||||
size_t array_len, u8 port);
|
||||
|
@@ -418,8 +418,12 @@ struct mlx5_core_health {
|
||||
u32 prev;
|
||||
int miss_counter;
|
||||
bool sick;
|
||||
/* wq spinlock to synchronize draining */
|
||||
spinlock_t wq_lock;
|
||||
struct workqueue_struct *wq;
|
||||
unsigned long flags;
|
||||
struct work_struct work;
|
||||
struct delayed_work recover_work;
|
||||
};
|
||||
|
||||
struct mlx5_cq_table {
|
||||
@@ -625,10 +629,6 @@ struct mlx5_db {
|
||||
int index;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_COMP_EQ_SIZE = 1024,
|
||||
};
|
||||
@@ -638,13 +638,6 @@ enum {
|
||||
MLX5_PTYS_EN = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_db_pgdir {
|
||||
struct list_head list;
|
||||
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
|
||||
__be32 *db_page;
|
||||
dma_addr_t db_dma;
|
||||
};
|
||||
|
||||
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
|
||||
|
||||
struct mlx5_cmd_work_ent {
|
||||
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_health_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
|
||||
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||
struct mlx5_buf *buf, int node);
|
||||
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
|
||||
|
@@ -1266,29 +1266,25 @@ static inline int fixup_user_fault(struct task_struct *tsk,
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
|
||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
|
||||
unsigned int gup_flags);
|
||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, int write);
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
|
||||
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int foll_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas, int *nonblocking);
|
||||
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages, int *locked);
|
||||
unsigned int gup_flags, struct page **pages, int *locked);
|
||||
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags);
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages);
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
|
||||
@@ -1306,7 +1302,7 @@ struct frame_vector {
|
||||
struct frame_vector *frame_vector_create(unsigned int nr_frames);
|
||||
void frame_vector_destroy(struct frame_vector *vec);
|
||||
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
|
||||
bool write, bool force, struct frame_vector *vec);
|
||||
unsigned int gup_flags, struct frame_vector *vec);
|
||||
void put_vaddr_frames(struct frame_vector *vec);
|
||||
int frame_vector_to_pages(struct frame_vector *vec);
|
||||
void frame_vector_to_pfns(struct frame_vector *vec);
|
||||
@@ -1391,7 +1387,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
||||
!vma_growsup(vma->vm_next, addr);
|
||||
}
|
||||
|
||||
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
|
||||
int vma_is_stack_for_current(struct vm_area_struct *vma);
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
@@ -2232,6 +2228,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
|
||||
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
|
||||
#define FOLL_MLOCK 0x1000 /* lock present pages */
|
||||
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
|
||||
#define FOLL_COW 0x4000 /* internal GUP flag */
|
||||
|
||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||
void *data);
|
||||
|
@@ -440,33 +440,7 @@ struct zone {
|
||||
seqlock_t span_seqlock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* wait_table -- the array holding the hash table
|
||||
* wait_table_hash_nr_entries -- the size of the hash table array
|
||||
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
|
||||
*
|
||||
* The purpose of all these is to keep track of the people
|
||||
* waiting for a page to become available and make them
|
||||
* runnable again when possible. The trouble is that this
|
||||
* consumes a lot of space, especially when so few things
|
||||
* wait on pages at a given time. So instead of using
|
||||
* per-page waitqueues, we use a waitqueue hash table.
|
||||
*
|
||||
* The bucket discipline is to sleep on the same queue when
|
||||
* colliding and wake all in that wait queue when removing.
|
||||
* When something wakes, it must check to be sure its page is
|
||||
* truly available, a la thundering herd. The cost of a
|
||||
* collision is great, but given the expected load of the
|
||||
* table, they should be so rare as to be outweighed by the
|
||||
* benefits from the saved space.
|
||||
*
|
||||
* __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
|
||||
* primary users of these fields, and in mm/page_alloc.c
|
||||
* free_area_init_core() performs the initialization of them.
|
||||
*/
|
||||
wait_queue_head_t *wait_table;
|
||||
unsigned long wait_table_hash_nr_entries;
|
||||
unsigned long wait_table_bits;
|
||||
int initialized;
|
||||
|
||||
/* Write-intensive fields used from the page allocator */
|
||||
ZONE_PADDING(_pad1_)
|
||||
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
|
||||
|
||||
static inline bool zone_is_initialized(struct zone *zone)
|
||||
{
|
||||
return !!zone->wait_table;
|
||||
return zone->initialized;
|
||||
}
|
||||
|
||||
static inline bool zone_is_empty(struct zone *zone)
|
||||
|
@@ -2167,7 +2167,10 @@ struct napi_gro_cb {
|
||||
/* Used to determine if flush_id can be ignored */
|
||||
u8 is_atomic:1;
|
||||
|
||||
/* 5 bit hole */
|
||||
/* Number of gro_receive callbacks this packet already went through */
|
||||
u8 recursion_counter:4;
|
||||
|
||||
/* 1 bit hole */
|
||||
|
||||
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
|
||||
__wsum csum;
|
||||
@@ -2178,6 +2181,40 @@ struct napi_gro_cb {
|
||||
|
||||
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
|
||||
|
||||
#define GRO_RECURSION_LIMIT 15
|
||||
static inline int gro_recursion_inc_test(struct sk_buff *skb)
|
||||
{
|
||||
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cb(head, skb);
|
||||
}
|
||||
|
||||
typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
|
||||
struct sk_buff *);
|
||||
static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
|
||||
struct sock *sk,
|
||||
struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(gro_recursion_inc_test(skb))) {
|
||||
NAPI_GRO_CB(skb)->flush |= 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cb(sk, head, skb);
|
||||
}
|
||||
|
||||
struct packet_type {
|
||||
__be16 type; /* This is really htons(ether_type). */
|
||||
struct net_device *dev; /* NULL is wildcarded here */
|
||||
|
@@ -16,7 +16,6 @@
|
||||
#define _LINUX_NVME_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/uuid.h>
|
||||
|
||||
/* NQN names in commands fields specified one size */
|
||||
#define NVMF_NQN_FIELD_LEN 256
|
||||
@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
|
||||
char fr[8];
|
||||
__u8 rab;
|
||||
__u8 ieee[3];
|
||||
__u8 mic;
|
||||
__u8 cmic;
|
||||
__u8 mdts;
|
||||
__le16 cntlid;
|
||||
__le32 ver;
|
||||
@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
|
||||
__u8 apsta;
|
||||
__le16 wctemp;
|
||||
__le16 cctemp;
|
||||
__u8 rsvd270[50];
|
||||
__le16 mtfa;
|
||||
__le32 hmpre;
|
||||
__le32 hmmin;
|
||||
__u8 tnvmcap[16];
|
||||
__u8 unvmcap[16];
|
||||
__le32 rpmbs;
|
||||
__u8 rsvd316[4];
|
||||
__le16 kas;
|
||||
__u8 rsvd322[190];
|
||||
__u8 sqes;
|
||||
@@ -267,7 +272,7 @@ struct nvme_id_ns {
|
||||
__le16 nabo;
|
||||
__le16 nabspf;
|
||||
__u16 rsvd46;
|
||||
__le64 nvmcap[2];
|
||||
__u8 nvmcap[16];
|
||||
__u8 rsvd64[40];
|
||||
__u8 nguid[16];
|
||||
__u8 eui64[8];
|
||||
@@ -276,6 +281,16 @@ struct nvme_id_ns {
|
||||
__u8 vs[3712];
|
||||
};
|
||||
|
||||
enum {
|
||||
NVME_ID_CNS_NS = 0x00,
|
||||
NVME_ID_CNS_CTRL = 0x01,
|
||||
NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
|
||||
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
|
||||
NVME_ID_CNS_NS_PRESENT = 0x11,
|
||||
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
|
||||
NVME_ID_CNS_CTRL_LIST = 0x13,
|
||||
};
|
||||
|
||||
enum {
|
||||
NVME_NS_FEAT_THIN = 1 << 0,
|
||||
NVME_NS_FLBAS_LBA_MASK = 0xf,
|
||||
@@ -556,8 +571,10 @@ enum nvme_admin_opcode {
|
||||
nvme_admin_set_features = 0x09,
|
||||
nvme_admin_get_features = 0x0a,
|
||||
nvme_admin_async_event = 0x0c,
|
||||
nvme_admin_ns_mgmt = 0x0d,
|
||||
nvme_admin_activate_fw = 0x10,
|
||||
nvme_admin_download_fw = 0x11,
|
||||
nvme_admin_ns_attach = 0x15,
|
||||
nvme_admin_keep_alive = 0x18,
|
||||
nvme_admin_format_nvm = 0x80,
|
||||
nvme_admin_security_send = 0x81,
|
||||
@@ -583,6 +600,7 @@ enum {
|
||||
NVME_FEAT_WRITE_ATOMIC = 0x0a,
|
||||
NVME_FEAT_ASYNC_EVENT = 0x0b,
|
||||
NVME_FEAT_AUTO_PST = 0x0c,
|
||||
NVME_FEAT_HOST_MEM_BUF = 0x0d,
|
||||
NVME_FEAT_KATO = 0x0f,
|
||||
NVME_FEAT_SW_PROGRESS = 0x80,
|
||||
NVME_FEAT_HOST_ID = 0x81,
|
||||
@@ -745,7 +763,7 @@ struct nvmf_common_command {
|
||||
struct nvmf_disc_rsp_page_entry {
|
||||
__u8 trtype;
|
||||
__u8 adrfam;
|
||||
__u8 nqntype;
|
||||
__u8 subtype;
|
||||
__u8 treq;
|
||||
__le16 portid;
|
||||
__le16 cntlid;
|
||||
@@ -794,7 +812,7 @@ struct nvmf_connect_command {
|
||||
};
|
||||
|
||||
struct nvmf_connect_data {
|
||||
uuid_be hostid;
|
||||
__u8 hostid[16];
|
||||
__le16 cntlid;
|
||||
char resv4[238];
|
||||
char subsysnqn[NVMF_NQN_FIELD_LEN];
|
||||
@@ -905,12 +923,23 @@ enum {
|
||||
NVME_SC_INVALID_VECTOR = 0x108,
|
||||
NVME_SC_INVALID_LOG_PAGE = 0x109,
|
||||
NVME_SC_INVALID_FORMAT = 0x10a,
|
||||
NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
|
||||
NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
|
||||
NVME_SC_INVALID_QUEUE = 0x10c,
|
||||
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
|
||||
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
|
||||
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
|
||||
NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
|
||||
NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
|
||||
NVME_SC_FW_NEEDS_RESET = 0x111,
|
||||
NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
|
||||
NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
|
||||
NVME_SC_OVERLAPPING_RANGE = 0x114,
|
||||
NVME_SC_NS_INSUFFICENT_CAP = 0x115,
|
||||
NVME_SC_NS_ID_UNAVAILABLE = 0x116,
|
||||
NVME_SC_NS_ALREADY_ATTACHED = 0x118,
|
||||
NVME_SC_NS_IS_PRIVATE = 0x119,
|
||||
NVME_SC_NS_NOT_ATTACHED = 0x11a,
|
||||
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
|
||||
NVME_SC_CTRL_LIST_INVALID = 0x11c,
|
||||
|
||||
/*
|
||||
* I/O Command Set Specific - NVM commands:
|
||||
@@ -941,6 +970,7 @@ enum {
|
||||
NVME_SC_REFTAG_CHECK = 0x284,
|
||||
NVME_SC_COMPARE_FAILED = 0x285,
|
||||
NVME_SC_ACCESS_DENIED = 0x286,
|
||||
NVME_SC_UNWRITTEN_BLOCK = 0x287,
|
||||
|
||||
NVME_SC_DNR = 0x4000,
|
||||
};
|
||||
@@ -960,6 +990,7 @@ struct nvme_completion {
|
||||
__le16 status; /* did the command fail, and if so, why? */
|
||||
};
|
||||
|
||||
#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
|
||||
#define NVME_VS(major, minor, tertiary) \
|
||||
(((major) << 16) | ((minor) << 8) | (tertiary))
|
||||
|
||||
#endif /* _LINUX_NVME_H */
|
||||
|
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
|
||||
extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
extern void perf_event_disable_local(struct perf_event *event);
|
||||
extern void perf_event_disable_inatomic(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else /* !CONFIG_PERF_EVENTS: */
|
||||
static inline void *
|
||||
|
@@ -25,7 +25,6 @@ static inline int mm_pkey_alloc(struct mm_struct *mm)
|
||||
|
||||
static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
|
||||
{
|
||||
WARN_ONCE(1, "free of protection key when disabled");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@@ -146,6 +146,7 @@ enum qed_led_mode {
|
||||
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
|
||||
|
||||
#define QED_COALESCE_MAX 0xFF
|
||||
#define QED_DEFAULT_RX_USECS 12
|
||||
|
||||
/* forward */
|
||||
struct qed_dev;
|
||||
|
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
|
||||
|
||||
bool qede_roce_supported(struct qede_dev *dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
int qede_roce_dev_add(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_open(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_close(struct qede_dev *dev);
|
||||
|
@@ -18,9 +18,20 @@ struct random_ready_callback {
|
||||
};
|
||||
|
||||
extern void add_device_randomness(const void *, unsigned int);
|
||||
|
||||
#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
|
||||
static inline void add_latent_entropy(void)
|
||||
{
|
||||
add_device_randomness((const void *)&latent_entropy,
|
||||
sizeof(latent_entropy));
|
||||
}
|
||||
#else
|
||||
static inline void add_latent_entropy(void) {}
|
||||
#endif
|
||||
|
||||
extern void add_input_randomness(unsigned int type, unsigned int code,
|
||||
unsigned int value);
|
||||
extern void add_interrupt_randomness(int irq, int irq_flags);
|
||||
unsigned int value) __latent_entropy;
|
||||
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
|
||||
|
||||
extern void get_random_bytes(void *buf, int nbytes);
|
||||
extern int add_random_ready_callback(struct random_ready_callback *rdy);
|
||||
|
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
|
||||
|
||||
/**
|
||||
* skb_fclone_busy - check if fclone is busy
|
||||
* @sk: socket
|
||||
* @skb: buffer
|
||||
*
|
||||
* Returns true if skb is a fast clone, and its clone is not freed.
|
||||
|
@@ -902,8 +902,5 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
|
||||
unsigned long prot, int pkey);
|
||||
asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
|
||||
asmlinkage long sys_pkey_free(int pkey);
|
||||
//asmlinkage long sys_pkey_get(int pkey, unsigned long flags);
|
||||
//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights,
|
||||
// unsigned long flags);
|
||||
|
||||
#endif
|
||||
|
@@ -13,17 +13,6 @@
|
||||
struct timespec;
|
||||
struct compat_timespec;
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
};
|
||||
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.flags = 0, \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
||||
#define current_thread_info() ((struct thread_info *)current)
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user