Merge tag 'v4.13-rc7' into for-4.14/block-postmerge
Linux 4.13-rc7 Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -1209,6 +1209,7 @@ static inline bool acpi_has_watchdog(void) { return false; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_SPCR_TABLE
|
||||
extern bool qdf2400_e44_present;
|
||||
int parse_spcr(bool earlycon);
|
||||
#else
|
||||
static inline int parse_spcr(bool earlycon) { return 0; }
|
||||
|
@@ -568,7 +568,6 @@ struct request_queue {
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
bsg_job_fn *bsg_job_fn;
|
||||
int bsg_job_size;
|
||||
struct bsg_class_device bsg_dev;
|
||||
#endif
|
||||
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#define _BLK_BSG_
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <scsi/scsi_request.h>
|
||||
|
||||
struct request;
|
||||
struct device;
|
||||
@@ -37,6 +38,7 @@ struct bsg_buffer {
|
||||
};
|
||||
|
||||
struct bsg_job {
|
||||
struct scsi_request sreq;
|
||||
struct device *dev;
|
||||
struct request *req;
|
||||
|
||||
|
@@ -148,6 +148,7 @@ struct ceph_osd_request_target {
|
||||
int size;
|
||||
int min_size;
|
||||
bool sort_bitwise;
|
||||
bool recovery_deletes;
|
||||
|
||||
unsigned int flags; /* CEPH_OSD_FLAG_* */
|
||||
bool paused;
|
||||
|
@@ -272,6 +272,8 @@ bool ceph_is_new_interval(const struct ceph_osds *old_acting,
|
||||
u32 new_pg_num,
|
||||
bool old_sort_bitwise,
|
||||
bool new_sort_bitwise,
|
||||
bool old_recovery_deletes,
|
||||
bool new_recovery_deletes,
|
||||
const struct ceph_pg *pgid);
|
||||
bool ceph_osds_changed(const struct ceph_osds *old_acting,
|
||||
const struct ceph_osds *new_acting,
|
||||
|
@@ -158,6 +158,10 @@ extern const char *ceph_osd_state_name(int s);
|
||||
#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */
|
||||
#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */
|
||||
#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */
|
||||
#define CEPH_OSDMAP_REQUIRE_JEWEL (1<<16) /* require jewel for booting osds */
|
||||
#define CEPH_OSDMAP_REQUIRE_KRAKEN (1<<17) /* require kraken for booting osds */
|
||||
#define CEPH_OSDMAP_REQUIRE_LUMINOUS (1<<18) /* require l for booting osds */
|
||||
#define CEPH_OSDMAP_RECOVERY_DELETES (1<<19) /* deletes performed during recovery instead of peering */
|
||||
|
||||
/*
|
||||
* The error code to return when an OSD can't handle a write
|
||||
|
@@ -39,8 +39,6 @@ enum cpuhp_state {
|
||||
CPUHP_PCI_XGENE_DEAD,
|
||||
CPUHP_IOMMU_INTEL_DEAD,
|
||||
CPUHP_LUSTRE_CFS_DEAD,
|
||||
CPUHP_SCSI_BNX2FC_DEAD,
|
||||
CPUHP_SCSI_BNX2I_DEAD,
|
||||
CPUHP_WORKQUEUE_PREP,
|
||||
CPUHP_POWER_NUMA_PREPARE,
|
||||
CPUHP_HRTIMERS_PREPARE,
|
||||
|
@@ -18,6 +18,19 @@
|
||||
|
||||
#ifdef CONFIG_CPUSETS
|
||||
|
||||
/*
|
||||
* Static branch rewrites can happen in an arbitrary order for a given
|
||||
* key. In code paths where we need to loop with read_mems_allowed_begin() and
|
||||
* read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
|
||||
* to ensure that begin() always gets rewritten before retry() in the
|
||||
* disabled -> enabled transition. If not, then if local irqs are disabled
|
||||
* around the loop, we can deadlock since retry() would always be
|
||||
* comparing the latest value of the mems_allowed seqcount against 0 as
|
||||
* begin() still would see cpusets_enabled() as false. The enabled -> disabled
|
||||
* transition should happen in reverse order for the same reasons (want to stop
|
||||
* looking at real value of mems_allowed.sequence in retry() first).
|
||||
*/
|
||||
extern struct static_key_false cpusets_pre_enable_key;
|
||||
extern struct static_key_false cpusets_enabled_key;
|
||||
static inline bool cpusets_enabled(void)
|
||||
{
|
||||
@@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
|
||||
|
||||
static inline void cpuset_inc(void)
|
||||
{
|
||||
static_branch_inc(&cpusets_pre_enable_key);
|
||||
static_branch_inc(&cpusets_enabled_key);
|
||||
}
|
||||
|
||||
static inline void cpuset_dec(void)
|
||||
{
|
||||
static_branch_dec(&cpusets_enabled_key);
|
||||
static_branch_dec(&cpusets_pre_enable_key);
|
||||
}
|
||||
|
||||
extern int cpuset_init(void);
|
||||
@@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void);
|
||||
*/
|
||||
static inline unsigned int read_mems_allowed_begin(void)
|
||||
{
|
||||
if (!cpusets_enabled())
|
||||
if (!static_branch_unlikely(&cpusets_pre_enable_key))
|
||||
return 0;
|
||||
|
||||
return read_seqcount_begin(¤t->mems_allowed_seq);
|
||||
@@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void)
|
||||
*/
|
||||
static inline bool read_mems_allowed_retry(unsigned int seq)
|
||||
{
|
||||
if (!cpusets_enabled())
|
||||
if (!static_branch_unlikely(&cpusets_enabled_key))
|
||||
return false;
|
||||
|
||||
return read_seqcount_retry(¤t->mems_allowed_seq, seq);
|
||||
|
@@ -193,7 +193,7 @@ struct crush_choose_arg {
|
||||
struct crush_choose_arg_map {
|
||||
#ifdef __KERNEL__
|
||||
struct rb_node node;
|
||||
u64 choose_args_index;
|
||||
s64 choose_args_index;
|
||||
#endif
|
||||
struct crush_choose_arg *args; /*!< replacement for each bucket
|
||||
in the crushmap */
|
||||
|
@@ -843,7 +843,7 @@ struct dev_links_info {
|
||||
* hibernation, system resume and during runtime PM transitions
|
||||
* along with subsystem-level and driver-level callbacks.
|
||||
* @pins: For device pin management.
|
||||
* See Documentation/pinctrl.txt for details.
|
||||
* See Documentation/driver-api/pinctl.rst for details.
|
||||
* @msi_list: Hosts MSI descriptors
|
||||
* @msi_domain: The generic MSI domain this device is using.
|
||||
* @numa_node: NUMA node this device is close to.
|
||||
|
@@ -19,6 +19,7 @@
|
||||
|
||||
struct pts_fs_info;
|
||||
|
||||
struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *);
|
||||
struct pts_fs_info *devpts_acquire(struct file *);
|
||||
void devpts_release(struct pts_fs_info *);
|
||||
|
||||
@@ -32,6 +33,15 @@ void *devpts_get_priv(struct dentry *);
|
||||
/* unlink */
|
||||
void devpts_pty_kill(struct dentry *);
|
||||
|
||||
/* in pty.c */
|
||||
int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags);
|
||||
|
||||
#else
|
||||
static inline int
|
||||
ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
@@ -908,9 +908,9 @@ static inline struct file *get_file(struct file *f)
|
||||
/* Page cache limit. The filesystems should put that into their s_maxbytes
|
||||
limits, otherwise bad things can happen in VM. */
|
||||
#if BITS_PER_LONG==32
|
||||
#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
|
||||
#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
|
||||
#elif BITS_PER_LONG==64
|
||||
#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
|
||||
#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
|
||||
#endif
|
||||
|
||||
#define FL_POSIX 1
|
||||
|
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
|
||||
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
|
||||
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
|
||||
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
|
||||
#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
|
||||
/* Warn users that the adapter doesn't support classes anymore */
|
||||
#define I2C_CLASS_DEPRECATED (1<<8)
|
||||
|
||||
/* Internal numbers to terminate lists */
|
||||
#define I2C_CLIENT_END 0xfffeU
|
||||
|
@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
|
||||
struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
|
||||
};
|
||||
|
||||
struct st_sensor_sim {
|
||||
u8 addr;
|
||||
u8 value;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct st_sensor_bdu - ST sensor device block data update
|
||||
* @addr: address of the register.
|
||||
@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
|
||||
* @bdu: Block data update register.
|
||||
* @das: Data Alignment Selection register.
|
||||
* @drdy_irq: Data ready register of the sensor.
|
||||
* @sim: SPI serial interface mode register of the sensor.
|
||||
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
|
||||
* @bootime: samples to discard when sensor passing from power-down to power-up.
|
||||
*/
|
||||
@@ -213,6 +219,7 @@ struct st_sensor_settings {
|
||||
struct st_sensor_bdu bdu;
|
||||
struct st_sensor_das das;
|
||||
struct st_sensor_data_ready_irq drdy_irq;
|
||||
struct st_sensor_sim sim;
|
||||
bool multi_read_bit;
|
||||
unsigned int bootime;
|
||||
};
|
||||
|
@@ -535,7 +535,7 @@ struct iio_buffer_setup_ops {
|
||||
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
|
||||
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
|
||||
* @trig: [INTERN] current device trigger (buffer modes)
|
||||
* @trig_readonly [INTERN] mark the current trigger immutable
|
||||
* @trig_readonly: [INTERN] mark the current trigger immutable
|
||||
* @pollfunc: [DRIVER] function run on trigger being received
|
||||
* @pollfunc_event: [DRIVER] function run on events trigger being received
|
||||
* @channels: [DRIVER] channel specification structure table
|
||||
|
@@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev,
|
||||
/**
|
||||
* iio_trigger_set_immutable() - set an immutable trigger on destination
|
||||
*
|
||||
* @indio_dev - IIO device structure containing the device
|
||||
* @trig - trigger to assign to device
|
||||
* @indio_dev: IIO device structure containing the device
|
||||
* @trig: trigger to assign to device
|
||||
*
|
||||
**/
|
||||
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
|
||||
|
@@ -240,7 +240,7 @@ struct iommu_device {
|
||||
struct list_head list;
|
||||
const struct iommu_ops *ops;
|
||||
struct fwnode_handle *fwnode;
|
||||
struct device dev;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
int iommu_device_register(struct iommu_device *iommu);
|
||||
@@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
|
||||
iommu->fwnode = fwnode;
|
||||
}
|
||||
|
||||
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
|
||||
{
|
||||
return (struct iommu_device *)dev_get_drvdata(dev);
|
||||
}
|
||||
|
||||
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
|
||||
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
|
||||
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
|
||||
@@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void iommu_device_unregister(struct iommu_device *iommu)
|
||||
{
|
||||
}
|
||||
|
@@ -128,6 +128,7 @@ struct inet6_skb_parm {
|
||||
#define IP6SKB_FRAGMENTED 16
|
||||
#define IP6SKB_HOPBYHOP 32
|
||||
#define IP6SKB_L3SLAVE 64
|
||||
#define IP6SKB_JUMBOGRAM 128
|
||||
};
|
||||
|
||||
#if defined(CONFIG_NET_L3_MASTER_DEV)
|
||||
@@ -152,6 +153,11 @@ static inline int inet6_iif(const struct sk_buff *skb)
|
||||
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
|
||||
}
|
||||
|
||||
static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
|
||||
{
|
||||
return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM);
|
||||
}
|
||||
|
||||
/* can not be used in TCP layer after tcp_v6_fill_cb */
|
||||
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
|
@@ -388,7 +388,12 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||
* @irq_mask_ack: ack and mask an interrupt source
|
||||
* @irq_unmask: unmask an interrupt source
|
||||
* @irq_eoi: end of interrupt
|
||||
* @irq_set_affinity: set the CPU affinity on SMP machines
|
||||
* @irq_set_affinity: Set the CPU affinity on SMP machines. If the force
|
||||
* argument is true, it tells the driver to
|
||||
* unconditionally apply the affinity setting. Sanity
|
||||
* checks against the supplied affinity mask are not
|
||||
* required. This is used for CPU hotplug where the
|
||||
* target CPU is not yet set in the cpu_online_mask.
|
||||
* @irq_retrigger: resend an IRQ to the CPU
|
||||
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
|
||||
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
|
||||
|
@@ -15,7 +15,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
* @threadfn: the function to run in the thread
|
||||
* @data: data pointer for @threadfn()
|
||||
* @namefmt: printf-style format string for the thread name
|
||||
* @...: arguments for @namefmt.
|
||||
* @arg...: arguments for @namefmt.
|
||||
*
|
||||
* This macro will create a kthread on the current node, leaving it in
|
||||
* the stopped state. This is just a helper for kthread_create_on_node();
|
||||
|
@@ -477,7 +477,8 @@ struct kvm {
|
||||
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
|
||||
{
|
||||
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
|
||||
lockdep_is_held(&kvm->slots_lock));
|
||||
lockdep_is_held(&kvm->slots_lock) ||
|
||||
!refcount_read(&kvm->users_count));
|
||||
}
|
||||
|
||||
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
|
||||
@@ -570,7 +571,8 @@ void kvm_put_kvm(struct kvm *kvm);
|
||||
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
|
||||
{
|
||||
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
|
||||
lockdep_is_held(&kvm->slots_lock));
|
||||
lockdep_is_held(&kvm->slots_lock) ||
|
||||
!refcount_read(&kvm->users_count));
|
||||
}
|
||||
|
||||
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
|
||||
|
@@ -435,7 +435,7 @@ enum {
|
||||
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
|
||||
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
|
||||
ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
|
||||
ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
|
||||
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
|
||||
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
|
||||
|
||||
|
@@ -61,6 +61,7 @@ extern int memblock_debug;
|
||||
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
|
||||
#define __init_memblock __meminit
|
||||
#define __initdata_memblock __meminitdata
|
||||
void memblock_discard(void);
|
||||
#else
|
||||
#define __init_memblock
|
||||
#define __initdata_memblock
|
||||
@@ -74,8 +75,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
|
||||
int nid, ulong flags);
|
||||
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
|
||||
phys_addr_t size, phys_addr_t align);
|
||||
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
|
||||
phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
|
||||
void memblock_allow_resize(void);
|
||||
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
|
||||
int memblock_add(phys_addr_t base, phys_addr_t size);
|
||||
@@ -110,6 +109,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
|
||||
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
|
||||
phys_addr_t *out_end);
|
||||
|
||||
void __memblock_free_early(phys_addr_t base, phys_addr_t size);
|
||||
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
|
||||
|
||||
/**
|
||||
* for_each_mem_range - iterate through memblock areas from type_a and not
|
||||
* included in type_b. Or just type_a if type_b is NULL.
|
||||
|
@@ -484,7 +484,8 @@ bool mem_cgroup_oom_synchronize(bool wait);
|
||||
extern int do_swap_account;
|
||||
#endif
|
||||
|
||||
void lock_page_memcg(struct page *page);
|
||||
struct mem_cgroup *lock_page_memcg(struct page *page);
|
||||
void __unlock_page_memcg(struct mem_cgroup *memcg);
|
||||
void unlock_page_memcg(struct page *page);
|
||||
|
||||
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
|
||||
@@ -809,7 +810,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void lock_page_memcg(struct page *page)
|
||||
static inline struct mem_cgroup *lock_page_memcg(struct page *page)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
|
@@ -620,6 +620,7 @@ struct mlx4_caps {
|
||||
u32 dmfs_high_rate_qpn_base;
|
||||
u32 dmfs_high_rate_qpn_range;
|
||||
u32 vf_caps;
|
||||
bool wol_port[MLX4_MAX_PORTS + 1];
|
||||
struct mlx4_rate_limit_caps rl_caps;
|
||||
};
|
||||
|
||||
|
@@ -7749,8 +7749,10 @@ struct mlx5_ifc_pcam_reg_bits {
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mcam_enhanced_features_bits {
|
||||
u8 reserved_at_0[0x7f];
|
||||
u8 reserved_at_0[0x7d];
|
||||
|
||||
u8 mtpps_enh_out_per_adj[0x1];
|
||||
u8 mtpps_fs[0x1];
|
||||
u8 pcie_performance_group[0x1];
|
||||
};
|
||||
|
||||
@@ -8159,7 +8161,8 @@ struct mlx5_ifc_mtpps_reg_bits {
|
||||
u8 reserved_at_78[0x4];
|
||||
u8 cap_pin_4_mode[0x4];
|
||||
|
||||
u8 reserved_at_80[0x80];
|
||||
u8 field_select[0x20];
|
||||
u8 reserved_at_a0[0x60];
|
||||
|
||||
u8 enable[0x1];
|
||||
u8 reserved_at_101[0xb];
|
||||
@@ -8174,8 +8177,9 @@ struct mlx5_ifc_mtpps_reg_bits {
|
||||
|
||||
u8 out_pulse_duration[0x10];
|
||||
u8 out_periodic_adjustment[0x10];
|
||||
u8 enhanced_out_periodic_adjustment[0x20];
|
||||
|
||||
u8 reserved_at_1a0[0x60];
|
||||
u8 reserved_at_1c0[0x20];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mtppse_reg_bits {
|
||||
|
@@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
|
||||
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
|
||||
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
|
||||
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
|
||||
#define MLX5_WQE_AV_EXT 0x80000000
|
||||
|
||||
enum {
|
||||
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
|
||||
|
@@ -487,13 +487,15 @@ struct mm_struct {
|
||||
/* numa_scan_seq prevents two threads setting pte_numa */
|
||||
int numa_scan_seq;
|
||||
#endif
|
||||
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
||||
/*
|
||||
* An operation with batched TLB flushing is going on. Anything that
|
||||
* can move process memory needs to flush the TLB when moving a
|
||||
* PROT_NONE or PROT_NUMA mapped page.
|
||||
*/
|
||||
bool tlb_flush_pending;
|
||||
atomic_t tlb_flush_pending;
|
||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
/* See flush_tlb_batched_pending() */
|
||||
bool tlb_flush_batched;
|
||||
#endif
|
||||
struct uprobes_state uprobes_state;
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
@@ -518,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
|
||||
return mm->cpu_vm_mask_var;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
||||
struct mmu_gather;
|
||||
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end);
|
||||
extern void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
/*
|
||||
* Memory barriers to keep this state in sync are graciously provided by
|
||||
* the page table locks, outside of which no page table modifications happen.
|
||||
* The barriers below prevent the compiler from re-ordering the instructions
|
||||
* around the memory barriers that are already present in the code.
|
||||
* The barriers are used to ensure the order between tlb_flush_pending updates,
|
||||
* which happen while the lock is not taken, and the PTE updates, which happen
|
||||
* while the lock is taken, are serialized.
|
||||
*/
|
||||
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
|
||||
{
|
||||
barrier();
|
||||
return mm->tlb_flush_pending;
|
||||
return atomic_read(&mm->tlb_flush_pending) > 0;
|
||||
}
|
||||
static inline void set_tlb_flush_pending(struct mm_struct *mm)
|
||||
|
||||
/*
|
||||
* Returns true if there are two above TLB batching threads in parallel.
|
||||
*/
|
||||
static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
|
||||
{
|
||||
mm->tlb_flush_pending = true;
|
||||
return atomic_read(&mm->tlb_flush_pending) > 1;
|
||||
}
|
||||
|
||||
static inline void init_tlb_flush_pending(struct mm_struct *mm)
|
||||
{
|
||||
atomic_set(&mm->tlb_flush_pending, 0);
|
||||
}
|
||||
|
||||
static inline void inc_tlb_flush_pending(struct mm_struct *mm)
|
||||
{
|
||||
atomic_inc(&mm->tlb_flush_pending);
|
||||
|
||||
/*
|
||||
* Guarantee that the tlb_flush_pending store does not leak into the
|
||||
* Guarantee that the tlb_flush_pending increase does not leak into the
|
||||
* critical section updating the page tables
|
||||
*/
|
||||
smp_mb__before_spinlock();
|
||||
}
|
||||
|
||||
/* Clearing is done after a TLB flush, which also provides a barrier. */
|
||||
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
|
||||
static inline void dec_tlb_flush_pending(struct mm_struct *mm)
|
||||
{
|
||||
barrier();
|
||||
mm->tlb_flush_pending = false;
|
||||
/*
|
||||
* Guarantee that the tlb_flush_pending does not not leak into the
|
||||
* critical section, since we must order the PTE change and changes to
|
||||
* the pending TLB flush indication. We could have relied on TLB flush
|
||||
* as a memory barrier, but this behavior is not clearly documented.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&mm->tlb_flush_pending);
|
||||
}
|
||||
#else
|
||||
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void set_tlb_flush_pending(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct vm_fault;
|
||||
|
||||
|
@@ -681,10 +681,10 @@ struct nand_buffers {
|
||||
* @tWW_min: WP# transition to WE# low
|
||||
*/
|
||||
struct nand_sdr_timings {
|
||||
u32 tBERS_max;
|
||||
u64 tBERS_max;
|
||||
u32 tCCS_min;
|
||||
u32 tPROG_max;
|
||||
u32 tR_max;
|
||||
u64 tPROG_max;
|
||||
u64 tR_max;
|
||||
u32 tALH_min;
|
||||
u32 tADL_min;
|
||||
u32 tALS_min;
|
||||
|
@@ -37,7 +37,7 @@ struct net;
|
||||
|
||||
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
|
||||
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
|
||||
* Eventually all flags will be in sk->sk_wq_flags.
|
||||
* Eventually all flags will be in sk->sk_wq->flags.
|
||||
*/
|
||||
#define SOCKWQ_ASYNC_NOSPACE 0
|
||||
#define SOCKWQ_ASYNC_WAITDATA 1
|
||||
|
@@ -1235,7 +1235,7 @@ struct nfs41_state_protection {
|
||||
|
||||
struct nfs41_exchange_id_args {
|
||||
struct nfs_client *client;
|
||||
nfs4_verifier *verifier;
|
||||
nfs4_verifier verifier;
|
||||
u32 flags;
|
||||
struct nfs41_state_protection state_protect;
|
||||
};
|
||||
|
@@ -168,6 +168,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
|
||||
#define sysctl_softlockup_all_cpu_backtrace 0
|
||||
#define sysctl_hardlockup_all_cpu_backtrace 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
|
||||
defined(CONFIG_HARDLOCKUP_DETECTOR)
|
||||
void watchdog_update_hrtimer_threshold(u64 period);
|
||||
#else
|
||||
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
|
||||
#endif
|
||||
|
||||
extern bool is_hardlockup(void);
|
||||
struct ctl_table;
|
||||
extern int proc_watchdog(struct ctl_table *, int ,
|
||||
|
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
|
||||
* indicating an FC transport Aborted status.
|
||||
* Entrypoint is Mandatory.
|
||||
*
|
||||
* @defer_rcv: Called by the transport to signal the LLLD that it has
|
||||
* begun processing of a previously received NVME CMD IU. The LLDD
|
||||
* is now free to re-use the rcv buffer associated with the
|
||||
* nvmefc_tgt_fcp_req.
|
||||
*
|
||||
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
|
||||
* supports for cpu affinitization.
|
||||
* Value is Mandatory. Must be at least 1.
|
||||
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
|
||||
struct nvmefc_tgt_fcp_req *fcpreq);
|
||||
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *fcpreq);
|
||||
void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *fcpreq);
|
||||
|
||||
u32 max_hw_queues;
|
||||
u16 max_sgl_segments;
|
||||
|
@@ -6,6 +6,8 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <uapi/linux/oom.h>
|
||||
#include <linux/sched/coredump.h> /* MMF_* */
|
||||
#include <linux/mm.h> /* VM_FAULT* */
|
||||
|
||||
struct zonelist;
|
||||
struct notifier_block;
|
||||
@@ -63,6 +65,26 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
||||
return tsk->signal->oom_mm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks whether a page fault on the given mm is still reliable.
|
||||
* This is no longer true if the oom reaper started to reap the
|
||||
* address space which is reflected by MMF_UNSTABLE flag set in
|
||||
* the mm. At that moment any !shared mapping would lose the content
|
||||
* and could cause a memory corruption (zero pages instead of the
|
||||
* original content).
|
||||
*
|
||||
* User should call this before establishing a page table entry for
|
||||
* a !shared mapping and under the proper page table lock.
|
||||
*
|
||||
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
|
||||
*/
|
||||
static inline int check_stable_address_space(struct mm_struct *mm)
|
||||
{
|
||||
if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
|
||||
return VM_FAULT_SIGBUS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern unsigned long oom_badness(struct task_struct *p,
|
||||
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
||||
unsigned long totalpages);
|
||||
|
@@ -163,8 +163,6 @@ void release_pages(struct page **pages, int nr, bool cold);
|
||||
*/
|
||||
static inline int page_cache_get_speculative(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(in_interrupt());
|
||||
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
# ifdef CONFIG_PREEMPT_COUNT
|
||||
VM_BUG_ON(!in_atomic() && !irqs_disabled());
|
||||
|
@@ -188,6 +188,8 @@ enum pci_dev_flags {
|
||||
* the direct_complete optimization.
|
||||
*/
|
||||
PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
|
||||
/* Don't use Relaxed Ordering for TLPs directed at this device */
|
||||
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
|
||||
};
|
||||
|
||||
enum pci_irq_reroute_variant {
|
||||
@@ -1067,6 +1069,7 @@ void pcie_flr(struct pci_dev *dev);
|
||||
int __pci_reset_function(struct pci_dev *dev);
|
||||
int __pci_reset_function_locked(struct pci_dev *dev);
|
||||
int pci_reset_function(struct pci_dev *dev);
|
||||
int pci_reset_function_locked(struct pci_dev *dev);
|
||||
int pci_try_reset_function(struct pci_dev *dev);
|
||||
int pci_probe_reset_slot(struct pci_slot *slot);
|
||||
int pci_reset_slot(struct pci_slot *slot);
|
||||
@@ -1125,6 +1128,7 @@ bool pci_check_pme_status(struct pci_dev *dev);
|
||||
void pci_pme_wakeup_bus(struct pci_bus *bus);
|
||||
void pci_d3cold_enable(struct pci_dev *dev);
|
||||
void pci_d3cold_disable(struct pci_dev *dev);
|
||||
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
|
||||
|
||||
/* PCI Virtual Channel */
|
||||
int pci_save_vc_state(struct pci_dev *dev);
|
||||
|
@@ -310,8 +310,8 @@ struct pmu {
|
||||
* Notification that the event was mapped or unmapped. Called
|
||||
* in the context of the mapping task.
|
||||
*/
|
||||
void (*event_mapped) (struct perf_event *event); /*optional*/
|
||||
void (*event_unmapped) (struct perf_event *event); /*optional*/
|
||||
void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
|
||||
void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
|
||||
|
||||
/*
|
||||
* Flags for ->add()/->del()/ ->start()/->stop(). There are
|
||||
|
@@ -830,7 +830,7 @@ static inline int phy_read_status(struct phy_device *phydev)
|
||||
dev_err(&_phydev->mdio.dev, format, ##args)
|
||||
|
||||
#define phydev_dbg(_phydev, format, args...) \
|
||||
dev_dbg(&_phydev->mdio.dev, format, ##args);
|
||||
dev_dbg(&_phydev->mdio.dev, format, ##args)
|
||||
|
||||
static inline const char *phydev_name(const struct phy_device *phydev)
|
||||
{
|
||||
|
@@ -8,7 +8,9 @@ enum pid_type
|
||||
PIDTYPE_PID,
|
||||
PIDTYPE_PGID,
|
||||
PIDTYPE_SID,
|
||||
PIDTYPE_MAX
|
||||
PIDTYPE_MAX,
|
||||
/* only valid to __task_pid_nr_ns() */
|
||||
__PIDTYPE_TGID
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -81,8 +81,8 @@
|
||||
* it.
|
||||
* @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
|
||||
* value on the line. Use argument 1 to indicate high level, argument 0 to
|
||||
* indicate low level. (Please see Documentation/pinctrl.txt, section
|
||||
* "GPIO mode pitfalls" for a discussion around this parameter.)
|
||||
* indicate low level. (Please see Documentation/driver-api/pinctl.rst,
|
||||
* section "GPIO mode pitfalls" for a discussion around this parameter.)
|
||||
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
|
||||
* supplies, the argument to this parameter (on a custom format) tells
|
||||
* the driver which alternative power source to use.
|
||||
|
@@ -17,10 +17,12 @@
|
||||
* Available only for accelerometer and pressure sensors.
|
||||
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
|
||||
* @open_drain: set the interrupt line to be open drain if possible.
|
||||
* @spi_3wire: enable spi-3wire mode.
|
||||
*/
|
||||
struct st_sensors_platform_data {
|
||||
u8 drdy_int_pin;
|
||||
bool open_drain;
|
||||
bool spi_3wire;
|
||||
};
|
||||
|
||||
#endif /* ST_SENSORS_PDATA_H */
|
||||
|
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
|
||||
* parameter func: the desired function to use.
|
||||
* parameter chan: the function channel index to use.
|
||||
*
|
||||
* @do_work: Request driver to perform auxiliary (periodic) operations
|
||||
* Driver should return delay of the next auxiliary work scheduling
|
||||
* time (>=0) or negative value in case further scheduling
|
||||
* is not required.
|
||||
*
|
||||
* Drivers should embed their ptp_clock_info within a private
|
||||
* structure, obtaining a reference to it using container_of().
|
||||
*
|
||||
@@ -126,6 +131,7 @@ struct ptp_clock_info {
|
||||
struct ptp_clock_request *request, int on);
|
||||
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
|
||||
enum ptp_pin_function func, unsigned int chan);
|
||||
long (*do_aux_work)(struct ptp_clock_info *ptp);
|
||||
};
|
||||
|
||||
struct ptp_clock;
|
||||
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
|
||||
int ptp_find_pin(struct ptp_clock *ptp,
|
||||
enum ptp_pin_function func, unsigned int chan);
|
||||
|
||||
/**
|
||||
* ptp_schedule_worker() - schedule ptp auxiliary work
|
||||
*
|
||||
* @ptp: The clock obtained from ptp_clock_register().
|
||||
* @delay: number of jiffies to wait before queuing
|
||||
* See kthread_queue_delayed_work() for more info.
|
||||
*/
|
||||
|
||||
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
|
||||
|
||||
#else
|
||||
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
||||
struct device *parent)
|
||||
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
|
||||
static inline int ptp_find_pin(struct ptp_clock *ptp,
|
||||
enum ptp_pin_function func, unsigned int chan)
|
||||
{ return -1; }
|
||||
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
|
||||
unsigned long delay)
|
||||
{ return -EOPNOTSUPP; }
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -436,9 +436,9 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
|
||||
__PTR_RING_PEEK_CALL_v; \
|
||||
})
|
||||
|
||||
static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
|
||||
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
|
||||
{
|
||||
return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
|
||||
return kcalloc(size, sizeof(void *), gfp);
|
||||
}
|
||||
|
||||
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
|
||||
@@ -582,7 +582,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
|
||||
* In particular if you consume ring in interrupt or BH context, you must
|
||||
* disable interrupts/BH when doing so.
|
||||
*/
|
||||
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
|
||||
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
|
||||
unsigned int nrings,
|
||||
int size,
|
||||
gfp_t gfp, void (*destroy)(void *))
|
||||
{
|
||||
@@ -590,7 +591,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
|
||||
void ***queues;
|
||||
int i;
|
||||
|
||||
queues = kmalloc(nrings * sizeof *queues, gfp);
|
||||
queues = kmalloc_array(nrings, sizeof(*queues), gfp);
|
||||
if (!queues)
|
||||
goto noqueues;
|
||||
|
||||
|
@@ -1163,13 +1163,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
|
||||
return tsk->tgid;
|
||||
}
|
||||
|
||||
extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
|
||||
|
||||
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
|
||||
{
|
||||
return pid_vnr(task_tgid(tsk));
|
||||
}
|
||||
|
||||
/**
|
||||
* pid_alive - check that a task structure is not stale
|
||||
* @p: Task structure to be checked.
|
||||
@@ -1185,23 +1178,6 @@ static inline int pid_alive(const struct task_struct *p)
|
||||
return p->pids[PIDTYPE_PID].pid != NULL;
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
pid_t pid = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
if (pid_alive(tsk))
|
||||
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
|
||||
rcu_read_unlock();
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
|
||||
{
|
||||
return task_ppid_nr_ns(tsk, &init_pid_ns);
|
||||
}
|
||||
|
||||
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
|
||||
@@ -1223,6 +1199,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
|
||||
}
|
||||
|
||||
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
|
||||
}
|
||||
|
||||
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
|
||||
{
|
||||
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
pid_t pid = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
if (pid_alive(tsk))
|
||||
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
|
||||
rcu_read_unlock();
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
|
||||
{
|
||||
return task_ppid_nr_ns(tsk, &init_pid_ns);
|
||||
}
|
||||
|
||||
/* Obsolete, do not use: */
|
||||
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
|
||||
{
|
||||
|
@@ -193,7 +193,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
|
||||
}
|
||||
|
||||
static inline int skb_array_resize_multiple(struct skb_array **rings,
|
||||
int nrings, int size, gfp_t gfp)
|
||||
int nrings, unsigned int size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct skb_array, ring));
|
||||
return ptr_ring_resize_multiple((struct ptr_ring **)rings,
|
||||
|
@@ -43,12 +43,13 @@ struct sync_file {
|
||||
#endif
|
||||
|
||||
wait_queue_head_t wq;
|
||||
unsigned long flags;
|
||||
|
||||
struct dma_fence *fence;
|
||||
struct dma_fence_cb cb;
|
||||
};
|
||||
|
||||
#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
|
||||
#define POLL_ENABLED 0
|
||||
|
||||
struct sync_file *sync_file_create(struct dma_fence *fence);
|
||||
struct dma_fence *sync_file_get_fence(int fd);
|
||||
|
@@ -152,7 +152,7 @@ extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
|
||||
size_t *data_size);
|
||||
|
||||
struct pci_dev;
|
||||
#ifdef CONFIG_EEH
|
||||
#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
|
||||
extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
|
||||
extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
|
||||
extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
|
||||
@@ -173,7 +173,7 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
#endif /* CONFIG_EEH */
|
||||
#endif /* CONFIG_VFIO_SPAPR_EEH */
|
||||
|
||||
/*
|
||||
* IRQfd - generic
|
||||
|
@@ -529,13 +529,13 @@ do { \
|
||||
|
||||
/**
|
||||
* wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
|
||||
* @wq_head: the waitqueue to wait on
|
||||
* @wq: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
* @timeout: timeout, as a ktime_t
|
||||
*
|
||||
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
|
||||
* @condition evaluates to true or a signal is received.
|
||||
* The @condition is checked each time the waitqueue @wq_head is woken up.
|
||||
* The @condition is checked each time the waitqueue @wq is woken up.
|
||||
*
|
||||
* wake_up() has to be called after changing any variable that could
|
||||
* change the result of the wait condition.
|
||||
@@ -735,12 +735,12 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
|
||||
|
||||
/**
|
||||
* wait_event_killable - sleep until a condition gets true
|
||||
* @wq: the waitqueue to wait on
|
||||
* @wq_head: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
*
|
||||
* The process is put to sleep (TASK_KILLABLE) until the
|
||||
* @condition evaluates to true or a signal is received.
|
||||
* The @condition is checked each time the waitqueue @wq is woken up.
|
||||
* The @condition is checked each time the waitqueue @wq_head is woken up.
|
||||
*
|
||||
* wake_up() has to be called after changing any variable that could
|
||||
* change the result of the wait condition.
|
||||
@@ -757,6 +757,43 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
|
||||
___wait_event(wq_head, ___wait_cond_timeout(condition), \
|
||||
TASK_KILLABLE, 0, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
/**
|
||||
* wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
|
||||
* @wq_head: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
* @timeout: timeout, in jiffies
|
||||
*
|
||||
* The process is put to sleep (TASK_KILLABLE) until the
|
||||
* @condition evaluates to true or a kill signal is received.
|
||||
* The @condition is checked each time the waitqueue @wq_head is woken up.
|
||||
*
|
||||
* wake_up() has to be called after changing any variable that could
|
||||
* change the result of the wait condition.
|
||||
*
|
||||
* Returns:
|
||||
* 0 if the @condition evaluated to %false after the @timeout elapsed,
|
||||
* 1 if the @condition evaluated to %true after the @timeout elapsed,
|
||||
* the remaining jiffies (at least 1) if the @condition evaluated
|
||||
* to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
|
||||
* interrupted by a kill signal.
|
||||
*
|
||||
* Only kill signals interrupt this process.
|
||||
*/
|
||||
#define wait_event_killable_timeout(wq_head, condition, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
might_sleep(); \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
__ret = __wait_event_killable_timeout(wq_head, \
|
||||
condition, timeout); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
|
||||
#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
|
||||
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
|
||||
|
@@ -323,6 +323,7 @@ enum {
|
||||
|
||||
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
||||
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
||||
__WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
|
||||
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
||||
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||
*/
|
||||
#define alloc_ordered_workqueue(fmt, flags, args...) \
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
|
||||
__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
|
||||
|
||||
#define create_workqueue(name) \
|
||||
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
|
||||
|
Reference in New Issue
Block a user