Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

We got slightly different patches removing a double word
in a comment in net/ipv4/raw.c - picked the version from net.

Simple conflict in drivers/net/ethernet/ibm/ibmvnic.c. Use cached
values instead of VNIC login response buffer (following what
commit 507ebe6444 ("ibmvnic: Fix use-after-free of VNIC login
response buffer") did).

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2020-09-04 21:18:58 -07:00
1669 changed files with 8287 additions and 5744 deletions

View File

@@ -117,11 +117,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
return true;
}
static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
{
iter->bi_bvec_done = 0;
iter->bi_idx++;
}
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
(bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
(bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \

View File

@@ -11,14 +11,14 @@
#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
static const uint64_t CEPH_FEATUREMASK_##name = \
static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \
static const uint64_t __maybe_unused CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*

View File

@@ -429,11 +429,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
/* fall through */
fallthrough;
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
/* fall through */
fallthrough;
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
/* fall through */
fallthrough;
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;

View File

@@ -956,8 +956,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
case CPUFREQ_RELATION_C:
return cpufreq_table_find_index_c(policy, target_freq);
default:
pr_err("%s: Invalid relation: %d\n", __func__, relation);
return -EINVAL;
WARN_ON_ONCE(1);
return 0;
}
}

View File

@@ -75,12 +75,13 @@ struct cpuidle_state {
};
/* Idle State Flags */
#define CPUIDLE_FLAG_NONE (0x00)
#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
#define CPUIDLE_FLAG_NONE (0x00)
#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;

View File

@@ -1200,7 +1200,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(RANDOM);
BPF_ANCILLARY(VLAN_TPID);
}
/* Fallthrough. */
fallthrough;
default:
return ftest->code;
}

View File

@@ -2132,6 +2132,10 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*
* I_DONTCACHE Evict inode as soon as it is not used anymore.
*
* I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
* Used to detect that mark_inode_dirty() should not move
* inode between dirty lists.
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2149,12 +2153,11 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
#define __I_DIRTY_TIME_EXPIRED 12
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
#define I_OVL_INUSE (1 << 14)
#define I_CREATING (1 << 15)
#define I_DONTCACHE (1 << 16)
#define I_SYNC_QUEUED (1 << 17)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)

View File

@@ -959,34 +959,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
*
* The value pointed to by @bit will be set to NULL if either @type is
* an unhandled event type, or if @c is out of range for @type. This
* can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
__u8 type, __u16 c)
__u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
usage->type = type;
usage->code = c;
unsigned long *bmap = NULL;
unsigned int limit = 0;
switch (type) {
case EV_ABS:
*bit = input->absbit;
*max = ABS_MAX;
bmap = input->absbit;
limit = ABS_MAX;
break;
case EV_REL:
*bit = input->relbit;
*max = REL_MAX;
bmap = input->relbit;
limit = REL_MAX;
break;
case EV_KEY:
*bit = input->keybit;
*max = KEY_MAX;
bmap = input->keybit;
limit = KEY_MAX;
break;
case EV_LED:
*bit = input->ledbit;
*max = LED_MAX;
bmap = input->ledbit;
limit = LED_MAX;
break;
}
if (unlikely(c > limit || !bmap)) {
pr_warn_ratelimited("%s: Invalid code %d type %d\n",
input->name, c, type);
*bit = NULL;
return;
}
usage->type = type;
usage->code = c;
*max = limit;
*bit = bmap;
}
/**
@@ -1000,7 +1015,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
clear_bit(c, *bit);
if (*bit)
clear_bit(usage->code, *bit);
}
/**

View File

@@ -49,17 +49,18 @@ struct irqtrace_events {
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
extern void trace_hardirqs_on_prepare(void);
extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define lockdep_hardirq_context() (this_cpu_read(hardirq_context))
extern void trace_hardirqs_on_prepare(void);
extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
# define lockdep_softirq_context(p) ((p)->softirq_context)
# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
# define lockdep_hardirq_enter() \
do { \
if (this_cpu_inc_return(hardirq_context) == 1) \
if (__this_cpu_inc_return(hardirq_context) == 1)\
current->hardirq_threaded = 0; \
} while (0)
# define lockdep_hardirq_threaded() \
@@ -68,7 +69,7 @@ do { \
} while (0)
# define lockdep_hardirq_exit() \
do { \
this_cpu_dec(hardirq_context); \
__this_cpu_dec(hardirq_context); \
} while (0)
# define lockdep_softirq_enter() \
do { \
@@ -120,17 +121,17 @@ do { \
#else
# define trace_hardirqs_on_prepare() do { } while (0)
# define trace_hardirqs_off_finish() do { } while (0)
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
# define lockdep_hardirq_context() 0
# define lockdep_softirq_context(p) 0
# define lockdep_hardirqs_enabled() 0
# define lockdep_softirqs_enabled(p) 0
# define lockdep_hardirq_enter() do { } while (0)
# define lockdep_hardirq_threaded() do { } while (0)
# define lockdep_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
# define lockdep_hardirq_context() 0
# define lockdep_softirq_context(p) 0
# define lockdep_hardirqs_enabled() 0
# define lockdep_softirqs_enabled(p) 0
# define lockdep_hardirq_enter() do { } while (0)
# define lockdep_hardirq_threaded() do { } while (0)
# define lockdep_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
# define lockdep_hrtimer_exit(__context) do { } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
@@ -181,26 +182,33 @@ do { \
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
#define local_irq_enable() \
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
#define local_irq_disable() \
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
#define local_irq_enable() \
do { \
trace_hardirqs_on(); \
raw_local_irq_enable(); \
} while (0)
#define local_irq_disable() \
do { \
bool was_disabled = raw_irqs_disabled();\
raw_local_irq_disable(); \
if (!was_disabled) \
trace_hardirqs_off(); \
} while (0)
#define local_irq_save(flags) \
do { \
raw_local_irq_save(flags); \
trace_hardirqs_off(); \
if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \
} while (0)
#define local_irq_restore(flags) \
do { \
if (raw_irqs_disabled_flags(flags)) { \
raw_local_irq_restore(flags); \
trace_hardirqs_off(); \
} else { \
if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
raw_local_irq_restore(flags); \
} \
raw_local_irq_restore(flags); \
} while (0)
#define safe_halt() \
@@ -214,10 +222,7 @@ do { \
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
#define local_irq_save(flags) \
do { \
raw_local_irq_save(flags); \
} while (0)
#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define safe_halt() do { raw_safe_halt(); } while (0)

View File

@@ -86,17 +86,17 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
}
/* Last block: affect all 32 bits of (c) */
switch (length) {
case 12: c += (u32)k[11]<<24; /* fall through */
case 11: c += (u32)k[10]<<16; /* fall through */
case 10: c += (u32)k[9]<<8; /* fall through */
case 9: c += k[8]; /* fall through */
case 8: b += (u32)k[7]<<24; /* fall through */
case 7: b += (u32)k[6]<<16; /* fall through */
case 6: b += (u32)k[5]<<8; /* fall through */
case 5: b += k[4]; /* fall through */
case 4: a += (u32)k[3]<<24; /* fall through */
case 3: a += (u32)k[2]<<16; /* fall through */
case 2: a += (u32)k[1]<<8; /* fall through */
case 12: c += (u32)k[11]<<24; fallthrough;
case 11: c += (u32)k[10]<<16; fallthrough;
case 10: c += (u32)k[9]<<8; fallthrough;
case 9: c += k[8]; fallthrough;
case 8: b += (u32)k[7]<<24; fallthrough;
case 7: b += (u32)k[6]<<16; fallthrough;
case 6: b += (u32)k[5]<<8; fallthrough;
case 5: b += k[4]; fallthrough;
case 4: a += (u32)k[3]<<24; fallthrough;
case 3: a += (u32)k[2]<<16; fallthrough;
case 2: a += (u32)k[1]<<8; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
@@ -132,8 +132,8 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
/* Handle the last 3 u32's */
switch (length) {
case 3: c += k[2]; /* fall through */
case 2: b += k[1]; /* fall through */
case 3: c += k[2]; fallthrough;
case 2: b += k[1]; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */

View File

@@ -186,7 +186,7 @@
* lower_32_bits - return bits 0-31 of a number
* @n: the number we're accessing
*/
#define lower_32_bits(n) ((u32)(n))
#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
struct completion;
struct pt_regs;

View File

@@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page,
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
bool reuse_ksm_page(struct page *page,
struct vm_area_struct *vma, unsigned long address);
#else /* !CONFIG_KSM */
@@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page,
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
}
static inline bool reuse_ksm_page(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
return false;
}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */

View File

@@ -421,6 +421,7 @@ enum {
ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */

View File

@@ -535,19 +535,27 @@ do { \
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
/*
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
* per-cpu variables. This is required because this_cpu_read() will potentially
* call into preempt/irq-disable and that obviously isn't right. This is also
* correct because when IRQs are enabled, it doesn't matter if we accidentally
* read the value from our previous CPU.
*/
#define lockdep_assert_irqs_enabled() \
do { \
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \
WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_irqs_disabled() \
do { \
WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \
WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_in_irq() \
do { \
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \
WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
} while (0)
#define lockdep_assert_preemption_enabled() \
@@ -555,7 +563,7 @@ do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
debug_locks && \
(preempt_count() != 0 || \
!this_cpu_read(hardirqs_enabled))); \
!raw_cpu_read(hardirqs_enabled))); \
} while (0)
#define lockdep_assert_preemption_disabled() \
@@ -563,7 +571,7 @@ do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
debug_locks && \
(preempt_count() == 0 && \
this_cpu_read(hardirqs_enabled))); \
raw_cpu_read(hardirqs_enabled))); \
} while (0)
#else

View File

@@ -157,11 +157,14 @@ static inline void __mm_zero_struct_page(struct page *page)
switch (sizeof(struct page)) {
case 80:
_pp[9] = 0; /* fallthrough */
_pp[9] = 0;
fallthrough;
case 72:
_pp[8] = 0; /* fallthrough */
_pp[8] = 0;
fallthrough;
case 64:
_pp[7] = 0; /* fallthrough */
_pp[7] = 0;
fallthrough;
case 56:
_pp[6] = 0;
_pp[5] = 0;
@@ -321,6 +324,8 @@ extern unsigned int kobjsize(const void *objp);
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_IA64)

View File

@@ -3,10 +3,15 @@
#define _LINUX_MMU_CONTEXT_H
#include <asm/mmu_context.h>
#include <asm/mmu.h>
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
#endif
#ifndef leave_mm
static inline void leave_mm(int cpu) { }
#endif
#endif

View File

@@ -9,6 +9,8 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
#endif /* _NF_CONNTRACK_SCTP_H */

View File

@@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
int flags);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{

View File

@@ -1666,7 +1666,7 @@ extern struct task_struct *idle_task(int cpu);
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
static __always_inline bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & PF_IDLE);
}

View File

@@ -137,11 +137,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
/* fall through */ \
fallthrough; \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
/* fall through */ \
fallthrough; \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
@@ -171,9 +171,9 @@ static inline void name(sigset_t *set) \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
/* fall through */ \
fallthrough; \
case 2: set->sig[1] = op(set->sig[1]); \
/* fall through */ \
fallthrough; \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
@@ -194,7 +194,7 @@ static inline void sigemptyset(sigset_t *set)
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
/* fall through */
fallthrough;
case 1: set->sig[0] = 0;
break;
}
@@ -207,7 +207,7 @@ static inline void sigfillset(sigset_t *set)
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
/* fall through */
fallthrough;
case 1: set->sig[0] = -1;
break;
}

View File

@@ -71,7 +71,7 @@
* NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
* TCP or UDP packets over IPv6. These are specifically
* unencapsulated packets of the form IPv6|TCP or
* IPv4|UDP where the Next Header field in the IPv6
* IPv6|UDP where the Next Header field in the IPv6
* header is either TCP or UDP. IPv6 extension headers
* are not supported with this feature. This feature
* cannot be set in features for a device with
@@ -1056,7 +1056,16 @@ void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
#else
static inline void consume_skb(struct sk_buff *skb)
{
return kfree_skb(skb);
}
#endif
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
@@ -2658,7 +2667,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
*
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet.
* get_rps_cpus() for example only access one 64 bytes aligned block :
* get_rps_cpu() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
@@ -3745,19 +3754,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
case 32: diffs |= __it_diff(a, b, 64);
/* fall through */
fallthrough;
case 24: diffs |= __it_diff(a, b, 64);
/* fall through */
fallthrough;
case 16: diffs |= __it_diff(a, b, 64);
/* fall through */
fallthrough;
case 8: diffs |= __it_diff(a, b, 64);
break;
case 28: diffs |= __it_diff(a, b, 64);
/* fall through */
fallthrough;
case 20: diffs |= __it_diff(a, b, 64);
/* fall through */
fallthrough;
case 12: diffs |= __it_diff(a, b, 64);
/* fall through */
fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
}

View File

@@ -220,6 +220,9 @@ struct ti_sci_rm_core_ops {
u16 *range_start, u16 *range_num);
};
#define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0
#define TI_SCI_RESASG_SUBTYPE_IA_VINT 0xa
#define TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT 0xd
/**
* struct ti_sci_rm_irq_ops: IRQ management operations
* @set_irq: Set an IRQ route between the requested source
@@ -556,6 +559,9 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res);
struct ti_sci_resource *
devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
struct device *dev, u32 dev_id, char *of_prop);
struct ti_sci_resource *
devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
u32 dev_id, u32 sub_type);
#else /* CONFIG_TI_SCI_PROTOCOL */
@@ -609,6 +615,13 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
{
return ERR_PTR(-EINVAL);
}
static inline struct ti_sci_resource *
devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
u32 dev_id, u32 sub_type);
{
return ERR_PTR(-EINVAL);
}
#endif /* CONFIG_TI_SCI_PROTOCOL */
#endif /* __TISCI_PROTOCOL_H */

View File

@@ -30,6 +30,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
PGREFILL,
PGREUSE,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSCAN_KSWAPD,