Merge v5.4-rc4 into drm-next
Thierry needs fd70c7755b
("drm/bridge: tc358767: fix max_tu_symbol
value") to be able to merge his dp_link patch series.
Some adjacent changes conflicts, plus some clashes in i915 due to
cherry-picking and git trying to be helpful and leaving both versions
in.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
@@ -326,10 +326,11 @@ static inline int bitmap_equal(const unsigned long *src1,
|
||||
}
|
||||
|
||||
/**
|
||||
* bitmap_or_equal - Check whether the or of two bitnaps is equal to a third
|
||||
* bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
|
||||
* @src1: Pointer to bitmap 1
|
||||
* @src2: Pointer to bitmap 2 will be or'ed with bitmap 1
|
||||
* @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
|
||||
* @nbits: number of bits in each of these bitmaps
|
||||
*
|
||||
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
|
||||
*/
|
||||
|
@@ -40,6 +40,7 @@
|
||||
# define __GCC4_has_attribute___noclone__ 1
|
||||
# define __GCC4_has_attribute___nonstring__ 0
|
||||
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
|
||||
# define __GCC4_has_attribute___fallthrough__ 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -185,6 +186,22 @@
|
||||
# define __noclone
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Add the pseudo keyword 'fallthrough' so case statement blocks
|
||||
* must end with any of these keywords:
|
||||
* break;
|
||||
* fallthrough;
|
||||
* goto <label>;
|
||||
* return [expression];
|
||||
*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#Statement-Attributes
|
||||
*/
|
||||
#if __has_attribute(__fallthrough__)
|
||||
# define fallthrough __attribute__((__fallthrough__))
|
||||
#else
|
||||
# define fallthrough do {} while (0) /* fallthrough */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Note the missing underscores.
|
||||
*
|
||||
|
@@ -52,10 +52,10 @@ extern struct module __this_module;
|
||||
__ADDRESSABLE(sym) \
|
||||
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
|
||||
" .balign 4 \n" \
|
||||
"__ksymtab_" #sym NS_SEPARATOR #ns ": \n" \
|
||||
"__ksymtab_" #ns NS_SEPARATOR #sym ": \n" \
|
||||
" .long " #sym "- . \n" \
|
||||
" .long __kstrtab_" #sym "- . \n" \
|
||||
" .long __kstrtab_ns_" #sym "- . \n" \
|
||||
" .long __kstrtabns_" #sym "- . \n" \
|
||||
" .previous \n")
|
||||
|
||||
#define __KSYMTAB_ENTRY(sym, sec) \
|
||||
@@ -76,10 +76,10 @@ struct kernel_symbol {
|
||||
#else
|
||||
#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \
|
||||
static const struct kernel_symbol __ksymtab_##sym##__##ns \
|
||||
asm("__ksymtab_" #sym NS_SEPARATOR #ns) \
|
||||
asm("__ksymtab_" #ns NS_SEPARATOR #sym) \
|
||||
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
|
||||
__aligned(sizeof(void *)) \
|
||||
= { (unsigned long)&sym, __kstrtab_##sym, __kstrtab_ns_##sym }
|
||||
= { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
|
||||
|
||||
#define __KSYMTAB_ENTRY(sym, sec) \
|
||||
static const struct kernel_symbol __ksymtab_##sym \
|
||||
@@ -112,7 +112,7 @@ struct kernel_symbol {
|
||||
/* For every exported symbol, place a struct in the __ksymtab section */
|
||||
#define ___EXPORT_SYMBOL_NS(sym, sec, ns) \
|
||||
___export_symbol_common(sym, sec); \
|
||||
static const char __kstrtab_ns_##sym[] \
|
||||
static const char __kstrtabns_##sym[] \
|
||||
__attribute__((section("__ksymtab_strings"), used, aligned(1))) \
|
||||
= #ns; \
|
||||
__KSYMTAB_ENTRY_NS(sym, sec, ns)
|
||||
|
@@ -201,6 +201,14 @@ struct gpio_irq_chip {
|
||||
*/
|
||||
bool threaded;
|
||||
|
||||
/**
|
||||
* @init_hw: optional routine to initialize hardware before
|
||||
* an IRQ chip will be added. This is quite useful when
|
||||
* a particular driver wants to clear IRQ related registers
|
||||
* in order to avoid undesired events.
|
||||
*/
|
||||
int (*init_hw)(struct gpio_chip *chip);
|
||||
|
||||
/**
|
||||
* @init_valid_mask: optional routine to initialize @valid_mask, to be
|
||||
* used if not all GPIO lines are valid interrupts. Sometimes some
|
||||
|
@@ -235,7 +235,7 @@ enum hwmon_power_attributes {
|
||||
#define HWMON_P_LABEL BIT(hwmon_power_label)
|
||||
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
|
||||
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
|
||||
#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm)
|
||||
#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm)
|
||||
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
|
||||
#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
|
||||
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
|
||||
|
@@ -247,7 +247,7 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
|
||||
/**
|
||||
* led_set_brightness_sync - set LED brightness synchronously
|
||||
* @led_cdev: the LED to set
|
||||
* @brightness: the brightness to set it to
|
||||
* @value: the brightness to set it to
|
||||
*
|
||||
* Set an LED's brightness immediately. This function will block
|
||||
* the caller for the time required for accessing device registers,
|
||||
@@ -301,8 +301,7 @@ extern void led_sysfs_enable(struct led_classdev *led_cdev);
|
||||
/**
|
||||
* led_compose_name - compose LED class device name
|
||||
* @dev: LED controller device object
|
||||
* @child: child fwnode_handle describing a LED or a group of synchronized LEDs;
|
||||
* it must be provided only for fwnode based LEDs
|
||||
* @init_data: the LED class device initialization data
|
||||
* @led_classdev_name: composed LED class device name
|
||||
*
|
||||
* Create LED class device name basing on the provided init_data argument.
|
||||
|
@@ -356,6 +356,19 @@ static inline bool mem_cgroup_disabled(void)
|
||||
return !cgroup_subsys_enabled(memory_cgrp_subsys);
|
||||
}
|
||||
|
||||
static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
|
||||
bool in_low_reclaim)
|
||||
{
|
||||
if (mem_cgroup_disabled())
|
||||
return 0;
|
||||
|
||||
if (in_low_reclaim)
|
||||
return READ_ONCE(memcg->memory.emin);
|
||||
|
||||
return max(READ_ONCE(memcg->memory.emin),
|
||||
READ_ONCE(memcg->memory.elow));
|
||||
}
|
||||
|
||||
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
|
||||
struct mem_cgroup *memcg);
|
||||
|
||||
@@ -537,6 +550,8 @@ void mem_cgroup_handle_over_high(void);
|
||||
|
||||
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
|
||||
|
||||
unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
|
||||
|
||||
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
|
||||
struct task_struct *p);
|
||||
|
||||
@@ -829,6 +844,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
|
||||
bool in_low_reclaim)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline enum mem_cgroup_protection mem_cgroup_protected(
|
||||
struct mem_cgroup *root, struct mem_cgroup *memcg)
|
||||
{
|
||||
@@ -968,6 +989,11 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
{
|
||||
@@ -1264,6 +1290,9 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
|
||||
static inline void mem_cgroup_track_foreign_dirty(struct page *page,
|
||||
struct bdi_writeback *wb)
|
||||
{
|
||||
if (mem_cgroup_disabled())
|
||||
return;
|
||||
|
||||
if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
|
||||
mem_cgroup_track_foreign_dirty_slowpath(page, wb);
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@
|
||||
#define PHY_ID_KSZ886X 0x00221430
|
||||
#define PHY_ID_KSZ8863 0x00221435
|
||||
|
||||
#define PHY_ID_KSZ8795 0x00221550
|
||||
#define PHY_ID_KSZ87XX 0x00221550
|
||||
|
||||
#define PHY_ID_KSZ9477 0x00221631
|
||||
|
||||
|
@@ -18,7 +18,7 @@ struct page_ext_operations {
|
||||
|
||||
enum page_ext_flags {
|
||||
PAGE_EXT_OWNER,
|
||||
PAGE_EXT_OWNER_ACTIVE,
|
||||
PAGE_EXT_OWNER_ALLOCATED,
|
||||
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
|
||||
PAGE_EXT_YOUNG,
|
||||
PAGE_EXT_IDLE,
|
||||
@@ -36,6 +36,7 @@ struct page_ext {
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
extern unsigned long page_ext_size;
|
||||
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
@@ -52,6 +53,13 @@ static inline void page_ext_init(void)
|
||||
|
||||
struct page_ext *lookup_page_ext(const struct page *page);
|
||||
|
||||
static inline struct page_ext *page_ext_next(struct page_ext *curr)
|
||||
{
|
||||
void *next = curr;
|
||||
next += page_ext_size;
|
||||
return next;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PAGE_EXTENSION */
|
||||
struct page_ext;
|
||||
|
||||
|
@@ -64,6 +64,8 @@ extern struct resource *platform_get_resource_byname(struct platform_device *,
|
||||
unsigned int,
|
||||
const char *);
|
||||
extern int platform_get_irq_byname(struct platform_device *, const char *);
|
||||
extern int platform_get_irq_byname_optional(struct platform_device *dev,
|
||||
const char *name);
|
||||
extern int platform_add_devices(struct platform_device **, int);
|
||||
|
||||
struct platform_device_info {
|
||||
|
@@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout);
|
||||
extern long schedule_timeout_idle(long timeout);
|
||||
asmlinkage void schedule(void);
|
||||
extern void schedule_preempt_disabled(void);
|
||||
asmlinkage void preempt_schedule_irq(void);
|
||||
|
||||
extern int __must_check io_schedule_prepare(void);
|
||||
extern void io_schedule_finish(int token);
|
||||
|
@@ -3510,8 +3510,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
|
||||
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
|
||||
int skb_vlan_pop(struct sk_buff *skb);
|
||||
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
|
||||
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
|
||||
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
|
||||
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
|
||||
int mac_len);
|
||||
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
|
||||
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
|
||||
int skb_mpls_dec_ttl(struct sk_buff *skb);
|
||||
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
|
||||
|
@@ -493,6 +493,10 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
|
||||
* kmalloc is the normal method of allocating memory
|
||||
* for objects smaller than page size in the kernel.
|
||||
*
|
||||
* The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
|
||||
* bytes. For @size of power of two bytes, the alignment is also guaranteed
|
||||
* to be at least to the size.
|
||||
*
|
||||
* The @flags argument may be one of the GFP flags defined at
|
||||
* include/linux/gfp.h and described at
|
||||
* :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
|
||||
|
@@ -227,7 +227,26 @@ static inline bool strstarts(const char *str, const char *prefix)
|
||||
}
|
||||
|
||||
size_t memweight(const void *ptr, size_t bytes);
|
||||
void memzero_explicit(void *s, size_t count);
|
||||
|
||||
/**
|
||||
* memzero_explicit - Fill a region of memory (e.g. sensitive
|
||||
* keying data) with 0s.
|
||||
* @s: Pointer to the start of the area.
|
||||
* @count: The size of the area.
|
||||
*
|
||||
* Note: usually using memset() is just fine (!), but in cases
|
||||
* where clearing out _local_ data at the end of a scope is
|
||||
* necessary, memzero_explicit() should be used instead in
|
||||
* order to prevent the compiler from optimising away zeroing.
|
||||
*
|
||||
* memzero_explicit() doesn't need an arch-specific version as
|
||||
* it just invokes the one of memset() implicitly.
|
||||
*/
|
||||
static inline void memzero_explicit(void *s, size_t count)
|
||||
{
|
||||
memset(s, 0, count);
|
||||
barrier_data(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbasename - return the last part of a pathname.
|
||||
|
@@ -61,6 +61,7 @@ struct sock_xprt {
|
||||
struct mutex recv_mutex;
|
||||
struct sockaddr_storage srcaddr;
|
||||
unsigned short srcport;
|
||||
int xprt_err;
|
||||
|
||||
/*
|
||||
* UDP socket buffer size parameters
|
||||
|
@@ -393,7 +393,7 @@ struct tcp_sock {
|
||||
/* fastopen_rsk points to request_sock that resulted in this big
|
||||
* socket. Used to retransmit SYNACKs etc.
|
||||
*/
|
||||
struct request_sock *fastopen_rsk;
|
||||
struct request_sock __rcu *fastopen_rsk;
|
||||
u32 *saved_syn;
|
||||
};
|
||||
|
||||
@@ -447,8 +447,8 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
|
||||
|
||||
static inline bool tcp_passive_fastopen(const struct sock *sk)
|
||||
{
|
||||
return (sk->sk_state == TCP_SYN_RECV &&
|
||||
tcp_sk(sk)->fastopen_rsk != NULL);
|
||||
return sk->sk_state == TCP_SYN_RECV &&
|
||||
rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
|
||||
}
|
||||
|
||||
static inline void fastopen_queue_tune(struct sock *sk, int backlog)
|
||||
|
@@ -152,7 +152,7 @@ struct tcg_algorithm_info {
|
||||
* total. Once we've done this we know the offset of the data length field,
|
||||
* and can calculate the total size of the event.
|
||||
*
|
||||
* Return: size of the event on success, <0 on failure
|
||||
* Return: size of the event on success, 0 on failure
|
||||
*/
|
||||
|
||||
static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
|
||||
@@ -170,6 +170,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
|
||||
u16 halg;
|
||||
int i;
|
||||
int j;
|
||||
u32 count, event_type;
|
||||
|
||||
marker = event;
|
||||
marker_start = marker;
|
||||
@@ -190,16 +191,22 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
|
||||
}
|
||||
|
||||
event = (struct tcg_pcr_event2_head *)mapping;
|
||||
/*
|
||||
* The loop below will unmap these fields if the log is larger than
|
||||
* one page, so save them here for reference:
|
||||
*/
|
||||
count = READ_ONCE(event->count);
|
||||
event_type = READ_ONCE(event->event_type);
|
||||
|
||||
efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
|
||||
|
||||
/* Check if event is malformed. */
|
||||
if (event->count > efispecid->num_algs) {
|
||||
if (count > efispecid->num_algs) {
|
||||
size = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < event->count; i++) {
|
||||
for (i = 0; i < count; i++) {
|
||||
halg_size = sizeof(event->digests[i].alg_id);
|
||||
|
||||
/* Map the digest's algorithm identifier */
|
||||
@@ -256,8 +263,9 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
|
||||
+ event_field->event_size;
|
||||
size = marker - marker_start;
|
||||
|
||||
if ((event->event_type == 0) && (event_field->event_size == 0))
|
||||
if (event_type == 0 && event_field->event_size == 0)
|
||||
size = 0;
|
||||
|
||||
out:
|
||||
if (do_mapping)
|
||||
TPM_MEMUNMAP(mapping, mapping_size);
|
||||
|
@@ -355,8 +355,10 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
|
||||
#ifndef user_access_begin
|
||||
#define user_access_begin(ptr,len) access_ok(ptr, len)
|
||||
#define user_access_end() do { } while (0)
|
||||
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
|
||||
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
|
||||
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
|
||||
#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
|
||||
#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
|
||||
#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
|
||||
static inline unsigned long user_access_save(void) { return 0UL; }
|
||||
static inline void user_access_restore(unsigned long flags) { }
|
||||
#endif
|
||||
|
@@ -230,8 +230,8 @@ static inline int xa_err(void *entry)
|
||||
* This structure is used either directly or via the XA_LIMIT() macro
|
||||
* to communicate the range of IDs that are valid for allocation.
|
||||
* Two common ranges are predefined for you:
|
||||
* * xa_limit_32b - [0 - UINT_MAX]
|
||||
* * xa_limit_31b - [0 - INT_MAX]
|
||||
* * xa_limit_32b - [0 - UINT_MAX]
|
||||
* * xa_limit_31b - [0 - INT_MAX]
|
||||
*/
|
||||
struct xa_limit {
|
||||
u32 max;
|
||||
|
Reference in New Issue
Block a user