Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into drm-next
Backmerge Linus master to get the connector locking revert. * 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux: (645 commits) sysctl: fix proc_doulongvec_ms_jiffies_minmax() Revert "drm/probe-helpers: Drop locking from poll_enable" MAINTAINERS: add Dan Streetman to zbud maintainers MAINTAINERS: add Dan Streetman to zswap maintainers mm: do not export ioremap_page_range symbol for external module mn10300: fix build error of missing fpu_save() romfs: use different way to generate fsid for BLOCK or MTD frv: add missing atomic64 operations mm, page_alloc: fix premature OOM when racing with cpuset mems update mm, page_alloc: move cpuset seqcount checking to slowpath mm, page_alloc: fix fast-path race with cpuset update or removal mm, page_alloc: fix check for NULL preferred_zone kernel/panic.c: add missing \n fbdev: color map copying bounds checking frv: add atomic64_add_unless() mm/mempolicy.c: do not put mempolicy before using its nodemask radix-tree: fix private list warnings Documentation/filesystems/proc.txt: add VmPin mm, memcg: do not retry precharge charges proc: add a schedule point in proc_pid_readdir() ...
Este cometimento está contido em:
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_timer_init_vhe(void);
|
||||
#endif
|
||||
|
@@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_zone_size(struct request_queue *q)
|
||||
static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
|
||||
{
|
||||
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
|
||||
}
|
||||
@@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
|
||||
return blk_rq_cur_bytes(rq) >> 9;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some commands like WRITE SAME have a payload or data transfer size which
|
||||
* is different from the size of the request. Any driver that supports such
|
||||
* commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
|
||||
* calculate the data transfer size.
|
||||
*/
|
||||
static inline unsigned int blk_rq_payload_bytes(struct request *rq)
|
||||
{
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
|
||||
return rq->special_vec.bv_len;
|
||||
return blk_rq_bytes(rq);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
||||
int op)
|
||||
{
|
||||
@@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_zone_size(struct block_device *bdev)
|
||||
static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return blk_queue_zone_size(q);
|
||||
return blk_queue_zone_sectors(q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
|
||||
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
|
||||
int bpf_prog_calc_digest(struct bpf_prog *fp);
|
||||
int bpf_prog_calc_tag(struct bpf_prog *fp);
|
||||
|
||||
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
|
||||
|
||||
|
@@ -14,6 +14,7 @@ struct coredump_params;
|
||||
extern int dump_skip(struct coredump_params *cprm, size_t nr);
|
||||
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
|
||||
extern int dump_align(struct coredump_params *cprm, int align);
|
||||
extern void dump_truncate(struct coredump_params *cprm);
|
||||
#ifdef CONFIG_COREDUMP
|
||||
extern void do_coredump(const siginfo_t *siginfo);
|
||||
#else
|
||||
|
@@ -74,6 +74,8 @@ enum cpuhp_state {
|
||||
CPUHP_ZCOMP_PREPARE,
|
||||
CPUHP_TIMERS_DEAD,
|
||||
CPUHP_MIPS_SOC_PREPARE,
|
||||
CPUHP_BP_PREPARE_DYN,
|
||||
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
|
||||
CPUHP_BRINGUP_CPU,
|
||||
CPUHP_AP_IDLE_DEAD,
|
||||
CPUHP_AP_OFFLINE,
|
||||
|
@@ -103,6 +103,7 @@ typedef struct {
|
||||
|
||||
#define EFI_PAGE_SHIFT 12
|
||||
#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
|
||||
#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
|
||||
|
||||
typedef struct {
|
||||
u32 type;
|
||||
@@ -950,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
|
||||
#endif
|
||||
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
|
||||
|
||||
extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
|
||||
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
|
||||
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
|
||||
extern void __init efi_memmap_unmap(void);
|
||||
|
@@ -57,6 +57,8 @@ struct bpf_prog_aux;
|
||||
/* BPF program can access up to 512 bytes of stack space. */
|
||||
#define MAX_BPF_STACK 512
|
||||
|
||||
#define BPF_TAG_SIZE 8
|
||||
|
||||
/* Helper macros for filter block array initializers. */
|
||||
|
||||
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
|
||||
@@ -408,7 +410,7 @@ struct bpf_prog {
|
||||
kmemcheck_bitfield_end(meta);
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
u32 len; /* Number of filter blocks */
|
||||
u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
|
||||
u8 tag[BPF_TAG_SIZE];
|
||||
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||
unsigned int (*bpf_func)(const void *ctx,
|
||||
@@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
|
||||
return prog->len * sizeof(struct bpf_insn);
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
|
||||
static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
|
||||
{
|
||||
return round_up(bpf_prog_insn_size(prog) +
|
||||
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
|
||||
|
@@ -38,9 +38,8 @@ struct vm_area_struct;
|
||||
#define ___GFP_ACCOUNT 0x100000u
|
||||
#define ___GFP_NOTRACK 0x200000u
|
||||
#define ___GFP_DIRECT_RECLAIM 0x400000u
|
||||
#define ___GFP_OTHER_NODE 0x800000u
|
||||
#define ___GFP_WRITE 0x1000000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
|
||||
#define ___GFP_WRITE 0x800000u
|
||||
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
|
||||
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
|
||||
|
||||
/*
|
||||
@@ -172,11 +171,6 @@ struct vm_area_struct;
|
||||
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
|
||||
* distinguishing in the source between false positives and allocations that
|
||||
* cannot be supported (e.g. page tables).
|
||||
*
|
||||
* __GFP_OTHER_NODE is for allocations that are on a remote node but that
|
||||
* should not be accounted for as a remote allocation in vmstat. A
|
||||
* typical user would be khugepaged collapsing a huge page on a remote
|
||||
* node.
|
||||
*/
|
||||
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
|
||||
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
|
||||
@@ -184,10 +178,9 @@ struct vm_area_struct;
|
||||
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
|
||||
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
|
||||
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
||||
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
|
||||
|
||||
/* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT 26
|
||||
#define __GFP_BITS_SHIFT 25
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/*
|
||||
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
|
||||
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
|
||||
|
||||
struct page_frag_cache;
|
||||
extern void __page_frag_drain(struct page *page, unsigned int order,
|
||||
unsigned int count);
|
||||
extern void *__alloc_page_frag(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void __free_page_frag(void *addr);
|
||||
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
|
||||
extern void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void page_frag_free(void *addr);
|
||||
|
||||
#define __free_page(page) __free_pages((page), 0)
|
||||
#define free_page(addr) free_pages((addr), 0)
|
||||
|
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
int parent_irq);
|
||||
|
||||
int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool nested,
|
||||
struct lock_class_key *lock_key);
|
||||
int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool nested,
|
||||
struct lock_class_key *lock_key);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
||||
/*
|
||||
* Lockdep requires that each irqchip instance be created with a
|
||||
* unique key so as to avoid unnecessary warnings. This upfront
|
||||
* boilerplate static inlines provides such a key for each
|
||||
* unique instance.
|
||||
*/
|
||||
static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type)
|
||||
{
|
||||
static struct lock_class_key key;
|
||||
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, false, &key);
|
||||
}
|
||||
|
||||
/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
|
||||
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type)
|
||||
{
|
||||
return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, NULL);
|
||||
|
||||
static struct lock_class_key key;
|
||||
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, &key);
|
||||
}
|
||||
#else
|
||||
static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type)
|
||||
{
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, false, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define gpiochip_irqchip_add(...) \
|
||||
( \
|
||||
({ \
|
||||
static struct lock_class_key _key; \
|
||||
_gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
|
||||
}) \
|
||||
)
|
||||
#else
|
||||
#define gpiochip_irqchip_add(...) \
|
||||
_gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
|
||||
#endif
|
||||
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type)
|
||||
{
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, NULL);
|
||||
}
|
||||
#endif /* CONFIG_LOCKDEP */
|
||||
|
||||
#endif /* CONFIG_GPIOLIB_IRQCHIP */
|
||||
|
||||
|
@@ -665,6 +665,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
|
||||
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
|
||||
/* Must equal I2C_M_TEN below */
|
||||
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
|
||||
#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
|
||||
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
|
||||
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
|
||||
/* Must match I2C_M_STOP|IGNORE_NAK */
|
||||
|
@@ -14,6 +14,7 @@ struct static_key_deferred {
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
|
||||
extern void static_key_deferred_flush(struct static_key_deferred *key);
|
||||
extern void
|
||||
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
|
||||
|
||||
@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
|
||||
STATIC_KEY_CHECK_USE();
|
||||
static_key_slow_dec(&key->key);
|
||||
}
|
||||
static inline void static_key_deferred_flush(struct static_key_deferred *key)
|
||||
{
|
||||
STATIC_KEY_CHECK_USE();
|
||||
}
|
||||
static inline void
|
||||
jump_label_rate_limit(struct static_key_deferred *key,
|
||||
unsigned long rl)
|
||||
|
@@ -514,8 +514,8 @@ extern enum system_states {
|
||||
#define TAINT_FLAGS_COUNT 16
|
||||
|
||||
struct taint_flag {
|
||||
char true; /* character printed when tainted */
|
||||
char false; /* character printed when not tainted */
|
||||
char c_true; /* character printed when tainted */
|
||||
char c_false; /* character printed when not tainted */
|
||||
bool module; /* also show as a per-module taint flag */
|
||||
};
|
||||
|
||||
|
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
|
||||
*/
|
||||
struct mem_cgroup_per_node {
|
||||
struct lruvec lruvec;
|
||||
unsigned long lru_size[NR_LRU_LISTS];
|
||||
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
|
||||
|
||||
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
|
||||
|
||||
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
|
||||
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
|
||||
|
||||
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
|
||||
int nr_pages);
|
||||
int zid, int nr_pages);
|
||||
|
||||
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
||||
int nid, unsigned int lru_mask);
|
||||
@@ -441,9 +441,23 @@ static inline
|
||||
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
struct mem_cgroup_per_node *mz;
|
||||
unsigned long nr_pages = 0;
|
||||
int zid;
|
||||
|
||||
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
||||
return mz->lru_size[lru];
|
||||
for (zid = 0; zid < MAX_NR_ZONES; zid++)
|
||||
nr_pages += mz->lru_zone_size[zid][lru];
|
||||
return nr_pages;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
|
||||
enum lru_list lru, int zone_idx)
|
||||
{
|
||||
struct mem_cgroup_per_node *mz;
|
||||
|
||||
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
||||
return mz->lru_zone_size[zone_idx][lru];
|
||||
}
|
||||
|
||||
void mem_cgroup_handle_over_high(void);
|
||||
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline
|
||||
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
|
||||
enum lru_list lru, int zone_idx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
||||
|
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
|
||||
unsigned long map_offset);
|
||||
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
|
||||
unsigned long pnum);
|
||||
extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
|
||||
enum zone_type target);
|
||||
extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
|
||||
enum zone_type target, int *zone_shift);
|
||||
|
||||
#endif /* __LINUX_MEMORY_HOTPLUG_H */
|
||||
|
@@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
||||
struct vm_area_struct *vma);
|
||||
void unmap_mapping_range(struct address_space *mapping,
|
||||
loff_t const holebegin, loff_t const holelen, int even_cows);
|
||||
int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
|
||||
spinlock_t **ptlp);
|
||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn);
|
||||
int follow_phys(struct vm_area_struct *vma, unsigned long address,
|
||||
|
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
|
||||
{
|
||||
__update_lru_size(lruvec, lru, zid, nr_pages);
|
||||
#ifdef CONFIG_MEMCG
|
||||
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
|
||||
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
|
||||
* @zonelist - The zonelist to search for a suitable zone
|
||||
* @highest_zoneidx - The zone index of the highest zone to return
|
||||
* @nodes - An optional nodemask to filter the zonelist with
|
||||
* @zone - The first suitable zone found is returned via this parameter
|
||||
* @return - Zoneref pointer for the first suitable zone found (see below)
|
||||
*
|
||||
* This function returns the first zone at or below a given zone index that is
|
||||
* within the allowed nodemask. The zoneref returned is a cursor that can be
|
||||
* used to iterate the zonelist with next_zones_zonelist by advancing it by
|
||||
* one before calling.
|
||||
*
|
||||
* When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
|
||||
* never NULL). This may happen either genuinely, or due to concurrent nodemask
|
||||
* update due to cpuset modification.
|
||||
*/
|
||||
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
|
||||
enum zone_type highest_zoneidx,
|
||||
|
@@ -2477,14 +2477,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
|
||||
return NAPI_GRO_CB(skb)->frag0_len < hlen;
|
||||
}
|
||||
|
||||
static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
|
||||
{
|
||||
NAPI_GRO_CB(skb)->frag0 = NULL;
|
||||
NAPI_GRO_CB(skb)->frag0_len = 0;
|
||||
}
|
||||
|
||||
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
|
||||
unsigned int offset)
|
||||
{
|
||||
if (!pskb_may_pull(skb, hlen))
|
||||
return NULL;
|
||||
|
||||
NAPI_GRO_CB(skb)->frag0 = NULL;
|
||||
NAPI_GRO_CB(skb)->frag0_len = 0;
|
||||
skb_gro_frag0_invalidate(skb);
|
||||
return skb->data + offset;
|
||||
}
|
||||
|
||||
|
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
|
||||
extern int watchdog_thresh;
|
||||
extern unsigned long watchdog_enabled;
|
||||
extern unsigned long *watchdog_cpumask_bits;
|
||||
extern atomic_t watchdog_park_in_progress;
|
||||
#ifdef CONFIG_SMP
|
||||
extern int sysctl_softlockup_all_cpu_backtrace;
|
||||
extern int sysctl_hardlockup_all_cpu_backtrace;
|
||||
|
@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
|
||||
extern void perf_event_disable_local(struct perf_event *event);
|
||||
extern void perf_event_disable_inatomic(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
extern int perf_event_account_interrupt(struct perf_event *event);
|
||||
#else /* !CONFIG_PERF_EVENTS: */
|
||||
static inline void *
|
||||
perf_aux_output_begin(struct perf_output_handle *handle,
|
||||
|
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
|
||||
#error "Unknown RCU implementation specified to kernel configuration"
|
||||
#endif
|
||||
|
||||
#define RCU_SCHEDULER_INACTIVE 0
|
||||
#define RCU_SCHEDULER_INIT 1
|
||||
#define RCU_SCHEDULER_RUNNING 2
|
||||
|
||||
/*
|
||||
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
|
||||
* initialization and destruction of rcu_head on the stack. rcu_head structures
|
||||
|
@@ -408,7 +408,8 @@ enum rproc_crash_type {
|
||||
* @crash_comp: completion used to sync crash handler and the rproc reload
|
||||
* @recovery_disabled: flag that state if recovery was disabled
|
||||
* @max_notifyid: largest allocated notify id.
|
||||
* @table_ptr: our copy of the resource table
|
||||
* @table_ptr: pointer to the resource table in effect
|
||||
* @cached_table: copy of the resource table
|
||||
* @has_iommu: flag to indicate if remote processor is behind an MMU
|
||||
*/
|
||||
struct rproc {
|
||||
@@ -440,6 +441,7 @@ struct rproc {
|
||||
bool recovery_disabled;
|
||||
int max_notifyid;
|
||||
struct resource_table *table_ptr;
|
||||
struct resource_table *cached_table;
|
||||
bool has_iommu;
|
||||
bool auto_boot;
|
||||
};
|
||||
|
@@ -854,6 +854,16 @@ struct signal_struct {
|
||||
|
||||
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
|
||||
|
||||
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
|
||||
SIGNAL_STOP_CONTINUED)
|
||||
|
||||
static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||
unsigned int flags)
|
||||
{
|
||||
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
|
||||
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||
}
|
||||
|
||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||
{
|
||||
|
@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
|
||||
|
||||
static inline void skb_free_frag(void *addr)
|
||||
{
|
||||
__free_page_frag(addr);
|
||||
page_frag_free(addr);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz);
|
||||
|
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
||||
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
||||
* be allocated from the same page.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
|
||||
#define KMALLOC_SHIFT_MAX 30
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
|
@@ -66,6 +66,7 @@ struct svc_xprt {
|
||||
#define XPT_LISTENER 10 /* listening endpoint */
|
||||
#define XPT_CACHE_AUTH 11 /* cache auth info */
|
||||
#define XPT_LOCAL 12 /* connection from loopback interface */
|
||||
#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
|
||||
|
||||
struct svc_serv *xpt_server; /* service for transport */
|
||||
atomic_t xpt_reserved; /* space on outq that is rsvd */
|
||||
|
@@ -150,8 +150,9 @@ enum {
|
||||
SWP_FILE = (1 << 7), /* set after swap_activate success */
|
||||
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
|
||||
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
|
||||
SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
|
||||
/* add others here before... */
|
||||
SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
|
||||
SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
|
||||
};
|
||||
|
||||
#define SWAP_CLUSTER_MAX 32UL
|
||||
|
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
|
||||
|
||||
/* TCP Fast Open Cookie as stored in memory */
|
||||
struct tcp_fastopen_cookie {
|
||||
union {
|
||||
u8 val[TCP_FASTOPEN_COOKIE_MAX];
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct in6_addr addr;
|
||||
#endif
|
||||
};
|
||||
s8 len;
|
||||
u8 val[TCP_FASTOPEN_COOKIE_MAX];
|
||||
bool exp; /* In RFC6994 experimental option format */
|
||||
};
|
||||
|
||||
|
@@ -8,23 +8,7 @@
|
||||
#ifndef _LINUX_TIMERFD_H
|
||||
#define _LINUX_TIMERFD_H
|
||||
|
||||
/* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
/* For _IO helpers */
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/asm-generic/fcntl.h when defining
|
||||
* new flags, since they might collide with O_* ones. We want
|
||||
* to re-use O_* flags that couldn't possibly have a meaning
|
||||
* from eventfd, in order to leave a free define-space for
|
||||
* shared O_* flags.
|
||||
*/
|
||||
#define TFD_TIMER_ABSTIME (1 << 0)
|
||||
#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
|
||||
#define TFD_CLOEXEC O_CLOEXEC
|
||||
#define TFD_NONBLOCK O_NONBLOCK
|
||||
#include <uapi/linux/timerfd.h>
|
||||
|
||||
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
|
||||
/* Flags for timerfd_create. */
|
||||
@@ -32,6 +16,4 @@
|
||||
/* Flags for timerfd_settime. */
|
||||
#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
|
||||
|
||||
#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
|
||||
|
||||
#endif /* _LINUX_TIMERFD_H */
|
||||
|
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
|
||||
/**
|
||||
* fc_set_wwpn() - Set the World Wide Port Name of a local port
|
||||
* @lport: The local port whose WWPN is to be set
|
||||
* @wwnn: The new WWPN
|
||||
* @wwpn: The new WWPN
|
||||
*/
|
||||
static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn)
|
||||
static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
|
||||
{
|
||||
lport->wwpn = wwnn;
|
||||
lport->wwpn = wwpn;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
|
||||
HDMI_AC97,
|
||||
HDMI_SPDIF,
|
||||
} fmt;
|
||||
int bit_clk_inv:1;
|
||||
int frame_clk_inv:1;
|
||||
int bit_clk_master:1;
|
||||
int frame_clk_master:1;
|
||||
unsigned int bit_clk_inv:1;
|
||||
unsigned int frame_clk_inv:1;
|
||||
unsigned int bit_clk_master:1;
|
||||
unsigned int frame_clk_master:1;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -813,6 +813,7 @@ struct snd_soc_component {
|
||||
unsigned int suspended:1; /* is in suspend PM state */
|
||||
|
||||
struct list_head list;
|
||||
struct list_head card_aux_list; /* for auxiliary bound components */
|
||||
struct list_head card_list;
|
||||
|
||||
struct snd_soc_dai_driver *dai_drv;
|
||||
@@ -1152,6 +1153,7 @@ struct snd_soc_card {
|
||||
*/
|
||||
struct snd_soc_aux_dev *aux_dev;
|
||||
int num_aux_devs;
|
||||
struct list_head aux_comp_list;
|
||||
|
||||
const struct snd_kcontrol_new *controls;
|
||||
int num_controls;
|
||||
@@ -1547,6 +1549,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
|
||||
INIT_LIST_HEAD(&card->widgets);
|
||||
INIT_LIST_HEAD(&card->paths);
|
||||
INIT_LIST_HEAD(&card->dapm_list);
|
||||
INIT_LIST_HEAD(&card->aux_comp_list);
|
||||
INIT_LIST_HEAD(&card->component_dev_list);
|
||||
}
|
||||
|
||||
|
@@ -174,6 +174,10 @@ enum tcm_sense_reason_table {
|
||||
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
|
||||
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
|
||||
TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
|
||||
TCM_TOO_MANY_TARGET_DESCS = R(0x19),
|
||||
TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a),
|
||||
TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
|
||||
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
|
||||
#undef R
|
||||
};
|
||||
|
||||
|
@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
|
||||
BTRFS_I(inode)->root->root_key.objectid;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
|
||||
"disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
|
||||
TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
|
||||
"disk_i_size=%llu last_trans=%llu logged_trans=%llu",
|
||||
show_root_type(__entry->root_objectid),
|
||||
(unsigned long long)__entry->generation,
|
||||
(unsigned long)__entry->ino,
|
||||
@@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
|
||||
|
||||
TRACE_EVENT_CONDITION(btrfs_get_extent,
|
||||
|
||||
TP_PROTO(struct btrfs_root *root, struct extent_map *map),
|
||||
TP_PROTO(struct btrfs_root *root, struct inode *inode,
|
||||
struct extent_map *map),
|
||||
|
||||
TP_ARGS(root, map),
|
||||
TP_ARGS(root, inode, map),
|
||||
|
||||
TP_CONDITION(map),
|
||||
|
||||
TP_STRUCT__entry_btrfs(
|
||||
__field( u64, root_objectid )
|
||||
__field( u64, ino )
|
||||
__field( u64, start )
|
||||
__field( u64, len )
|
||||
__field( u64, orig_start )
|
||||
@@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
|
||||
|
||||
TP_fast_assign_btrfs(root->fs_info,
|
||||
__entry->root_objectid = root->root_key.objectid;
|
||||
__entry->start = map->start;
|
||||
__entry->ino = btrfs_ino(inode);
|
||||
__entry->start = map->start;
|
||||
__entry->len = map->len;
|
||||
__entry->orig_start = map->orig_start;
|
||||
__entry->block_start = map->block_start;
|
||||
@@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
|
||||
__entry->compress_type = map->compress_type;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, "
|
||||
"orig_start = %llu, block_start = %llu(%s), "
|
||||
"block_len = %llu, flags = %s, refs = %u, "
|
||||
"compress_type = %u",
|
||||
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
|
||||
"orig_start=%llu block_start=%llu(%s) "
|
||||
"block_len=%llu flags=%s refs=%u "
|
||||
"compress_type=%u",
|
||||
show_root_type(__entry->root_objectid),
|
||||
(unsigned long long)__entry->ino,
|
||||
(unsigned long long)__entry->start,
|
||||
(unsigned long long)__entry->len,
|
||||
(unsigned long long)__entry->orig_start,
|
||||
@@ -259,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
|
||||
__field( int, compress_type )
|
||||
__field( int, refs )
|
||||
__field( u64, root_objectid )
|
||||
__field( u64, truncated_len )
|
||||
),
|
||||
|
||||
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
|
||||
@@ -273,18 +278,21 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
|
||||
__entry->refs = atomic_read(&ordered->refs);
|
||||
__entry->root_objectid =
|
||||
BTRFS_I(inode)->root->root_key.objectid;
|
||||
__entry->truncated_len = ordered->truncated_len;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, "
|
||||
"start = %llu, len = %llu, disk_len = %llu, "
|
||||
"bytes_left = %llu, flags = %s, compress_type = %d, "
|
||||
"refs = %d",
|
||||
TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu "
|
||||
"start=%llu len=%llu disk_len=%llu "
|
||||
"truncated_len=%llu "
|
||||
"bytes_left=%llu flags=%s compress_type=%d "
|
||||
"refs=%d",
|
||||
show_root_type(__entry->root_objectid),
|
||||
(unsigned long long)__entry->ino,
|
||||
(unsigned long long)__entry->file_offset,
|
||||
(unsigned long long)__entry->start,
|
||||
(unsigned long long)__entry->len,
|
||||
(unsigned long long)__entry->disk_len,
|
||||
(unsigned long long)__entry->truncated_len,
|
||||
(unsigned long long)__entry->bytes_left,
|
||||
show_ordered_flags(__entry->flags),
|
||||
__entry->compress_type, __entry->refs)
|
||||
@@ -354,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
|
||||
BTRFS_I(inode)->root->root_key.objectid;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, "
|
||||
"nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
|
||||
"range_end = %llu, for_kupdate = %d, "
|
||||
"for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
|
||||
TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
|
||||
"nr_to_write=%ld pages_skipped=%ld range_start=%llu "
|
||||
"range_end=%llu for_kupdate=%d "
|
||||
"for_reclaim=%d range_cyclic=%d writeback_index=%lu",
|
||||
show_root_type(__entry->root_objectid),
|
||||
(unsigned long)__entry->ino, __entry->index,
|
||||
__entry->nr_to_write, __entry->pages_skipped,
|
||||
@@ -400,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
|
||||
BTRFS_I(page->mapping->host)->root->root_key.objectid;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
|
||||
"end = %llu, uptodate = %d",
|
||||
TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
|
||||
"end=%llu uptodate=%d",
|
||||
show_root_type(__entry->root_objectid),
|
||||
(unsigned long)__entry->ino, (unsigned long)__entry->index,
|
||||
(unsigned long long)__entry->start,
|
||||
@@ -433,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file,
|
||||
BTRFS_I(inode)->root->root_key.objectid;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
|
||||
TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
|
||||
show_root_type(__entry->root_objectid),
|
||||
(unsigned long)__entry->ino, (unsigned long)__entry->parent,
|
||||
__entry->datasync)
|
||||
@@ -484,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group,
|
||||
__entry->create = create;
|
||||
),
|
||||
|
||||
TP_printk("%pU: block_group offset = %llu, size = %llu, "
|
||||
"flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, "
|
||||
"create = %d", __entry->fsid,
|
||||
TP_printk("%pU: block_group offset=%llu size=%llu "
|
||||
"flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
|
||||
"create=%d", __entry->fsid,
|
||||
(unsigned long long)__entry->offset,
|
||||
(unsigned long long)__entry->size,
|
||||
(unsigned long long)__entry->flags,
|
||||
@@ -535,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
|
||||
__entry->seq = ref->seq;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
|
||||
"parent = %llu(%s), ref_root = %llu(%s), level = %d, "
|
||||
"type = %s, seq = %llu",
|
||||
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
|
||||
"parent=%llu(%s) ref_root=%llu(%s) level=%d "
|
||||
"type=%s seq=%llu",
|
||||
(unsigned long long)__entry->bytenr,
|
||||
(unsigned long long)__entry->num_bytes,
|
||||
show_ref_action(__entry->action),
|
||||
@@ -600,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
|
||||
__entry->seq = ref->seq;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
|
||||
"parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
|
||||
"offset = %llu, type = %s, seq = %llu",
|
||||
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
|
||||
"parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
|
||||
"offset=%llu type=%s seq=%llu",
|
||||
(unsigned long long)__entry->bytenr,
|
||||
(unsigned long long)__entry->num_bytes,
|
||||
show_ref_action(__entry->action),
|
||||
@@ -657,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
|
||||
__entry->is_data = head_ref->is_data;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
|
||||
TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
|
||||
(unsigned long long)__entry->bytenr,
|
||||
(unsigned long long)__entry->num_bytes,
|
||||
show_ref_action(__entry->action),
|
||||
@@ -721,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
|
||||
__entry->root_objectid = fs_info->chunk_root->root_key.objectid;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
|
||||
"num_stripes = %d, sub_stripes = %d, type = %s",
|
||||
TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
|
||||
"num_stripes=%d sub_stripes=%d type=%s",
|
||||
show_root_type(__entry->root_objectid),
|
||||
(unsigned long long)__entry->offset,
|
||||
(unsigned long long)__entry->size,
|
||||
@@ -771,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block,
|
||||
__entry->cow_level = btrfs_header_level(cow);
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu "
|
||||
"(orig_level = %d), cow_buf = %llu (cow_level = %d)",
|
||||
TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu "
|
||||
"(orig_level=%d) cow_buf=%llu (cow_level=%d)",
|
||||
show_root_type(__entry->root_objectid),
|
||||
__entry->refs,
|
||||
(unsigned long long)__entry->buf_start,
|
||||
@@ -836,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush,
|
||||
__assign_str(reason, reason)
|
||||
),
|
||||
|
||||
TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu",
|
||||
TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
|
||||
__entry->fsid, __get_str(reason), __entry->flush,
|
||||
show_flush_action(__entry->flush),
|
||||
(unsigned long long)__entry->flags,
|
||||
@@ -879,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space,
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, "
|
||||
"orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state,
|
||||
TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu "
|
||||
"orig_bytes=%llu ret=%d", __entry->fsid, __entry->state,
|
||||
show_flush_state(__entry->state),
|
||||
(unsigned long long)__entry->flags,
|
||||
__print_flags((unsigned long)__entry->flags, "|",
|
||||
@@ -905,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
|
||||
TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
|
||||
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
|
||||
(unsigned long long)__entry->start,
|
||||
(unsigned long long)__entry->len)
|
||||
@@ -944,7 +952,7 @@ TRACE_EVENT(find_free_extent,
|
||||
__entry->data = data;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
|
||||
TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
|
||||
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
|
||||
__entry->num_bytes, __entry->empty_size, __entry->data,
|
||||
__print_flags((unsigned long)__entry->data, "|",
|
||||
@@ -973,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
|
||||
"start = %Lu, len = %Lu",
|
||||
TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
|
||||
"start=%Lu len=%Lu",
|
||||
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
|
||||
__entry->bg_objectid,
|
||||
__entry->flags, __print_flags((unsigned long)__entry->flags,
|
||||
@@ -1025,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster,
|
||||
__entry->min_bytes = min_bytes;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
|
||||
" empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
|
||||
TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
|
||||
"empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
|
||||
__entry->flags,
|
||||
__print_flags((unsigned long)__entry->flags, "|",
|
||||
BTRFS_GROUP_FLAGS), __entry->start,
|
||||
@@ -1047,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
|
||||
__entry->bg_objectid = block_group->key.objectid;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid)
|
||||
TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(btrfs_setup_cluster,
|
||||
@@ -1075,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster,
|
||||
__entry->bitmap = bitmap;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
|
||||
"size = %Lu, max_size = %Lu, bitmap = %d",
|
||||
TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
|
||||
"size=%Lu max_size=%Lu bitmap=%d",
|
||||
__entry->bg_objectid,
|
||||
__entry->flags,
|
||||
__print_flags((unsigned long)__entry->flags, "|",
|
||||
@@ -1103,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state,
|
||||
__entry->ip = IP
|
||||
),
|
||||
|
||||
TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
|
||||
TP_printk("state=%p mask=%s caller=%pS", __entry->state,
|
||||
show_gfp_flags(__entry->mask), (void *)__entry->ip)
|
||||
);
|
||||
|
||||
@@ -1123,7 +1131,7 @@ TRACE_EVENT(free_extent_state,
|
||||
__entry->ip = IP
|
||||
),
|
||||
|
||||
TP_printk(" state=%p; caller = %pS", __entry->state,
|
||||
TP_printk("state=%p caller=%pS", __entry->state,
|
||||
(void *)__entry->ip)
|
||||
);
|
||||
|
||||
@@ -1151,28 +1159,32 @@ DECLARE_EVENT_CLASS(btrfs__work,
|
||||
__entry->normal_work = &work->normal_work;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
|
||||
" ordered_free=%p",
|
||||
TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
|
||||
"ordered_free=%p",
|
||||
__entry->work, __entry->normal_work, __entry->wq,
|
||||
__entry->func, __entry->ordered_func, __entry->ordered_free)
|
||||
);
|
||||
|
||||
/* For situiations that the work is freed */
|
||||
/*
|
||||
* For situiations when the work is freed, we pass fs_info and a tag that that
|
||||
* matches address of the work structure so it can be paired with the
|
||||
* scheduling event.
|
||||
*/
|
||||
DECLARE_EVENT_CLASS(btrfs__work__done,
|
||||
|
||||
TP_PROTO(struct btrfs_work *work),
|
||||
TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
|
||||
|
||||
TP_ARGS(work),
|
||||
TP_ARGS(fs_info, wtag),
|
||||
|
||||
TP_STRUCT__entry_btrfs(
|
||||
__field( void *, work )
|
||||
__field( void *, wtag )
|
||||
),
|
||||
|
||||
TP_fast_assign_btrfs(btrfs_work_owner(work),
|
||||
__entry->work = work;
|
||||
TP_fast_assign_btrfs(fs_info,
|
||||
__entry->wtag = wtag;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("work->%p", __entry->work)
|
||||
TP_printk_btrfs("work->%p", __entry->wtag)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__work, btrfs_work_queued,
|
||||
@@ -1191,9 +1203,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
|
||||
|
||||
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
|
||||
|
||||
TP_PROTO(struct btrfs_work *work),
|
||||
TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
|
||||
|
||||
TP_ARGS(work)
|
||||
TP_ARGS(fs_info, wtag)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
|
||||
@@ -1221,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
|
||||
__entry->high = high;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name),
|
||||
TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
|
||||
__print_flags(__entry->high, "",
|
||||
{(WQ_HIGHPRI), "-high"}),
|
||||
__entry->wq)
|
||||
@@ -1276,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
|
||||
__entry->free_reserved = free_reserved;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu",
|
||||
TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
|
||||
__entry->rootid, __entry->ino, __entry->free_reserved)
|
||||
);
|
||||
|
||||
@@ -1323,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
|
||||
__entry->op = op;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
|
||||
TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
|
||||
__entry->rootid, __entry->ino, __entry->start, __entry->len,
|
||||
__entry->reserved,
|
||||
__print_flags((unsigned long)__entry->op, "",
|
||||
@@ -1361,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
|
||||
__entry->reserved = reserved;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("root=%llu, reserved=%llu, op=free",
|
||||
TP_printk_btrfs("root=%llu reserved=%llu op=free",
|
||||
__entry->ref_root, __entry->reserved)
|
||||
);
|
||||
|
||||
@@ -1388,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
|
||||
__entry->num_bytes = rec->num_bytes;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu",
|
||||
TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
|
||||
(unsigned long long)__entry->bytenr,
|
||||
(unsigned long long)__entry->num_bytes)
|
||||
);
|
||||
@@ -1430,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
|
||||
__entry->nr_new_roots = nr_new_roots;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
|
||||
"nr_new_roots = %llu",
|
||||
TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
|
||||
"nr_new_roots=%llu",
|
||||
__entry->bytenr,
|
||||
__entry->num_bytes,
|
||||
__entry->nr_old_roots,
|
||||
@@ -1457,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters,
|
||||
__entry->cur_new_count = cur_new_count;
|
||||
),
|
||||
|
||||
TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
|
||||
TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
|
||||
__entry->qgid,
|
||||
__entry->cur_old_count,
|
||||
__entry->cur_new_count)
|
||||
|
@@ -47,8 +47,7 @@
|
||||
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
|
||||
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
|
||||
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
|
||||
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
|
||||
{(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
|
||||
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
|
||||
|
||||
#define show_gfp_flags(flags) \
|
||||
(flags) ? __print_flags(flags, "|", \
|
||||
|
@@ -414,6 +414,7 @@ header-y += telephony.h
|
||||
header-y += termios.h
|
||||
header-y += thermal.h
|
||||
header-y += time.h
|
||||
header-y += timerfd.h
|
||||
header-y += times.h
|
||||
header-y += timex.h
|
||||
header-y += tiocl.h
|
||||
|
@@ -1772,7 +1772,9 @@ enum nl80211_commands {
|
||||
*
|
||||
* @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode
|
||||
* Notification Element based on association request when used with
|
||||
* %NL80211_CMD_NEW_STATION; u8 attribute.
|
||||
* %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when
|
||||
* %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS);
|
||||
* u8 attribute.
|
||||
*
|
||||
* @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
|
||||
* %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
|
||||
|
@@ -397,7 +397,7 @@ enum {
|
||||
TCA_BPF_NAME,
|
||||
TCA_BPF_FLAGS,
|
||||
TCA_BPF_FLAGS_GEN,
|
||||
TCA_BPF_DIGEST,
|
||||
TCA_BPF_TAG,
|
||||
__TCA_BPF_MAX,
|
||||
};
|
||||
|
||||
|
@@ -27,7 +27,7 @@ enum {
|
||||
TCA_ACT_BPF_FD,
|
||||
TCA_ACT_BPF_NAME,
|
||||
TCA_ACT_BPF_PAD,
|
||||
TCA_ACT_BPF_DIGEST,
|
||||
TCA_ACT_BPF_TAG,
|
||||
__TCA_ACT_BPF_MAX,
|
||||
};
|
||||
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
|
||||
|
36
include/uapi/linux/timerfd.h
Ficheiro normal
36
include/uapi/linux/timerfd.h
Ficheiro normal
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* include/linux/timerfd.h
|
||||
*
|
||||
* Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_LINUX_TIMERFD_H
|
||||
#define _UAPI_LINUX_TIMERFD_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/fcntl.h>
|
||||
|
||||
/* For _IO helpers */
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
/*
|
||||
* CAREFUL: Check include/asm-generic/fcntl.h when defining
|
||||
* new flags, since they might collide with O_* ones. We want
|
||||
* to re-use O_* flags that couldn't possibly have a meaning
|
||||
* from eventfd, in order to leave a free define-space for
|
||||
* shared O_* flags.
|
||||
*
|
||||
* Also make sure to update the masks in include/linux/timerfd.h
|
||||
* when adding new flags.
|
||||
*/
|
||||
#define TFD_TIMER_ABSTIME (1 << 0)
|
||||
#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
|
||||
#define TFD_CLOEXEC O_CLOEXEC
|
||||
#define TFD_NONBLOCK O_NONBLOCK
|
||||
|
||||
#define TFD_IOC_SET_TICKS _IOW('T', 0, __u64)
|
||||
|
||||
#endif /* _UAPI_LINUX_TIMERFD_H */
|
Criar uma nova questão referindo esta
Bloquear um utilizador