Merge back earlier cpufreq material for v4.10.
This commit is contained in:
@@ -85,6 +85,8 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
|
||||
return dev_name(&adev->dev);
|
||||
}
|
||||
|
||||
struct device *acpi_get_first_physical_node(struct acpi_device *adev);
|
||||
|
||||
enum acpi_irq_model_id {
|
||||
ACPI_IRQ_MODEL_PIC = 0,
|
||||
ACPI_IRQ_MODEL_IOAPIC,
|
||||
@@ -267,12 +269,18 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
|
||||
return phys_id == PHYS_CPUID_INVALID;
|
||||
}
|
||||
|
||||
/* Validate the processor object's proc_id */
|
||||
bool acpi_processor_validate_proc_id(int proc_id);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
/* Arch dependent functions for cpu hotplug support */
|
||||
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
|
||||
int acpi_unmap_cpu(int cpu);
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
void acpi_set_processor_mapping(void);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
|
||||
#endif
|
||||
@@ -634,6 +642,11 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void acpi_early_init(void) { }
|
||||
static inline void acpi_subsystem_init(void) { }
|
||||
|
||||
@@ -751,6 +764,12 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
|
||||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
int acpi_ioapic_add(acpi_handle root);
|
||||
#else
|
||||
static inline int acpi_ioapic_add(acpi_handle root) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
|
||||
u32 pm1a_ctrl, u32 pm1b_ctrl));
|
||||
@@ -927,9 +946,17 @@ struct acpi_reference_args {
|
||||
#ifdef CONFIG_ACPI
|
||||
int acpi_dev_get_property(struct acpi_device *adev, const char *name,
|
||||
acpi_object_type type, const union acpi_object **obj);
|
||||
int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args);
|
||||
int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index, size_t num_args,
|
||||
struct acpi_reference_args *args);
|
||||
|
||||
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args)
|
||||
{
|
||||
return __acpi_node_get_property_reference(fwnode, name, index,
|
||||
MAX_ACPI_REFERENCE_ARGS, args);
|
||||
}
|
||||
|
||||
int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
|
||||
void **valptr);
|
||||
@@ -1005,6 +1032,14 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index, size_t num_args,
|
||||
struct acpi_reference_args *args)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
|
||||
const char *name, size_t index,
|
||||
struct acpi_reference_args *args)
|
||||
@@ -1074,4 +1109,16 @@ void acpi_table_upgrade(void);
|
||||
static inline void acpi_table_upgrade(void) { }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG)
|
||||
extern bool acpi_has_watchdog(void);
|
||||
#else
|
||||
static inline bool acpi_has_watchdog(void) { return false; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_SPCR_TABLE
|
||||
int parse_spcr(bool earlycon);
|
||||
#else
|
||||
static inline int parse_spcr(bool earlycon) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
42
include/linux/acpi_iort.h
Normal file
42
include/linux/acpi_iort.h
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (C) 2016, Semihalf
|
||||
* Author: Tomasz Nowicki <tn@semihalf.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
||||
*/
|
||||
|
||||
#ifndef __ACPI_IORT_H__
|
||||
#define __ACPI_IORT_H__
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/fwnode.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
|
||||
void iort_deregister_domain_token(int trans_id);
|
||||
struct fwnode_handle *iort_find_domain_token(int trans_id);
|
||||
#ifdef CONFIG_ACPI_IORT
|
||||
void acpi_iort_init(void);
|
||||
u32 iort_msi_map_rid(struct device *dev, u32 req_id);
|
||||
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
|
||||
#else
|
||||
static inline void acpi_iort_init(void) { }
|
||||
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
|
||||
{ return req_id; }
|
||||
static inline struct irq_domain *iort_get_device_domain(struct device *dev,
|
||||
u32 req_id)
|
||||
{ return NULL; }
|
||||
#endif
|
||||
|
||||
#endif /* __ACPI_IORT_H__ */
|
@@ -63,7 +63,7 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
void cper_print_aer(struct pci_dev *dev, int cper_severity,
|
||||
void cper_print_aer(struct pci_dev *dev, int aer_severity,
|
||||
struct aer_capability_regs *aer);
|
||||
int cper_severity_to_aer(int cper_severity);
|
||||
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
|
||||
|
@@ -53,8 +53,14 @@ enum amba_vendor {
|
||||
AMBA_VENDOR_ST = 0x80,
|
||||
AMBA_VENDOR_QCOM = 0x51,
|
||||
AMBA_VENDOR_LSI = 0xb6,
|
||||
AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
|
||||
};
|
||||
|
||||
/* This is used to generate pseudo-ID for AMBA device */
|
||||
#define AMBA_LINUX_ID(conf, rev, part) \
|
||||
(((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
|
||||
AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
|
||||
|
||||
extern struct bus_type amba_bustype;
|
||||
|
||||
#define to_amba_device(d) container_of(d, struct amba_device, dev)
|
||||
|
@@ -67,6 +67,17 @@
|
||||
#define CNTL_LDMAFIFOTIME (1 << 15)
|
||||
#define CNTL_WATERMARK (1 << 16)
|
||||
|
||||
/* ST Microelectronics variant bits */
|
||||
#define CNTL_ST_1XBPP_444 0x0
|
||||
#define CNTL_ST_1XBPP_5551 (1 << 17)
|
||||
#define CNTL_ST_1XBPP_565 (1 << 18)
|
||||
#define CNTL_ST_CDWID_12 0x0
|
||||
#define CNTL_ST_CDWID_16 (1 << 19)
|
||||
#define CNTL_ST_CDWID_18 (1 << 20)
|
||||
#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20))
|
||||
#define CNTL_ST_CEAEN (1 << 21)
|
||||
#define CNTL_ST_LCDBPP24_PACKED (6 << 1)
|
||||
|
||||
enum {
|
||||
/* individual formats */
|
||||
CLCD_CAP_RGB444 = (1 << 0),
|
||||
@@ -93,6 +104,8 @@ enum {
|
||||
CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
|
||||
};
|
||||
|
||||
struct backlight_device;
|
||||
|
||||
struct clcd_panel {
|
||||
struct fb_videomode mode;
|
||||
signed short width; /* width in mm */
|
||||
@@ -105,6 +118,13 @@ struct clcd_panel {
|
||||
fixedtimings:1,
|
||||
grayscale:1;
|
||||
unsigned int connector;
|
||||
struct backlight_device *backlight;
|
||||
/*
|
||||
* If the B/R lines are switched between the CLCD
|
||||
* and the panel we need to know this and not try to
|
||||
* compensate with the BGR bit in the control register.
|
||||
*/
|
||||
bool bgr_connection;
|
||||
};
|
||||
|
||||
struct clcd_regs {
|
||||
@@ -170,11 +190,38 @@ struct clcd_board {
|
||||
struct amba_device;
|
||||
struct clk;
|
||||
|
||||
/**
|
||||
* struct clcd_vendor_data - holds hardware (IP-block) vendor-specific
|
||||
* variant information
|
||||
*
|
||||
* @clock_timregs: the CLCD needs to be clocked when accessing the
|
||||
* timer registers, or the hardware will hang.
|
||||
* @packed_24_bit_pixels: this variant supports 24bit packed pixel data,
|
||||
* so that RGB accesses 3 bytes at a time, not just on even 32bit
|
||||
* boundaries, packing the pixel data in memory. ST Microelectronics
|
||||
* have this.
|
||||
* @st_bitmux_control: ST Microelectronics have implemented output
|
||||
* bit line multiplexing into the CLCD control register. This indicates
|
||||
* that we need to use this.
|
||||
* @init_board: custom board init function for this variant
|
||||
* @init_panel: custom panel init function for this variant
|
||||
*/
|
||||
struct clcd_vendor_data {
|
||||
bool clock_timregs;
|
||||
bool packed_24_bit_pixels;
|
||||
bool st_bitmux_control;
|
||||
int (*init_board)(struct amba_device *adev,
|
||||
struct clcd_board *board);
|
||||
int (*init_panel)(struct clcd_fb *fb,
|
||||
struct device_node *panel);
|
||||
};
|
||||
|
||||
/* this data structure describes each frame buffer device we find */
|
||||
struct clcd_fb {
|
||||
struct fb_info fb;
|
||||
struct amba_device *dev;
|
||||
struct clk *clk;
|
||||
struct clcd_vendor_data *vendor;
|
||||
struct clcd_panel *panel;
|
||||
struct clcd_board *board;
|
||||
void *board_data;
|
||||
@@ -231,16 +278,22 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
|
||||
if (var->grayscale)
|
||||
val |= CNTL_LCDBW;
|
||||
|
||||
if (fb->panel->caps && fb->board->caps &&
|
||||
var->bits_per_pixel >= 16) {
|
||||
if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
|
||||
/*
|
||||
* if board and panel supply capabilities, we can support
|
||||
* changing BGR/RGB depending on supplied parameters
|
||||
* changing BGR/RGB depending on supplied parameters. Here
|
||||
* we switch to what the framebuffer is providing if need
|
||||
* be, so if the framebuffer is BGR but the display connection
|
||||
* is RGB (first case) we switch it around. Vice versa mutatis
|
||||
* mutandis if the framebuffer is RGB but the display connection
|
||||
* is BGR, we flip it around.
|
||||
*/
|
||||
if (var->red.offset == 0)
|
||||
val &= ~CNTL_BGR;
|
||||
else
|
||||
val |= CNTL_BGR;
|
||||
if (fb->panel->bgr_connection)
|
||||
val ^= CNTL_BGR;
|
||||
}
|
||||
|
||||
switch (var->bits_per_pixel) {
|
||||
@@ -270,6 +323,10 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
|
||||
else
|
||||
val |= CNTL_LCDBPP16_444;
|
||||
break;
|
||||
case 24:
|
||||
/* Modified variant supporting 24 bit packed pixels */
|
||||
val |= CNTL_ST_LCDBPP24_PACKED;
|
||||
break;
|
||||
case 32:
|
||||
val |= CNTL_LCDBPP24;
|
||||
break;
|
||||
|
@@ -104,6 +104,15 @@
|
||||
#define UART01x_FR_CTS 0x001
|
||||
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
|
||||
|
||||
/*
|
||||
* Some bits of Flag Register on ZTE device have different position from
|
||||
* standard ones.
|
||||
*/
|
||||
#define ZX_UART01x_FR_BUSY 0x100
|
||||
#define ZX_UART01x_FR_DSR 0x008
|
||||
#define ZX_UART01x_FR_CTS 0x002
|
||||
#define ZX_UART011_FR_RI 0x001
|
||||
|
||||
#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
|
||||
#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
|
||||
#define UART011_CR_OUT2 0x2000 /* OUT2 */
|
||||
|
@@ -22,6 +22,20 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* This is mainly used to communicate information back-and-forth
|
||||
* between SVM and IOMMU for setting up and tearing down posted
|
||||
* interrupt
|
||||
*/
|
||||
struct amd_iommu_pi_data {
|
||||
u32 ga_tag;
|
||||
u32 prev_ga_tag;
|
||||
u64 base;
|
||||
bool is_guest_mode;
|
||||
struct vcpu_data *vcpu_data;
|
||||
void *ir_data;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU
|
||||
|
||||
struct task_struct;
|
||||
@@ -168,11 +182,34 @@ typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
|
||||
|
||||
extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
|
||||
amd_iommu_invalidate_ctx cb);
|
||||
|
||||
#else
|
||||
#else /* CONFIG_AMD_IOMMU */
|
||||
|
||||
static inline int amd_iommu_detect(void) { return -ENODEV; }
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_AMD_IOMMU */
|
||||
|
||||
#if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP)
|
||||
|
||||
/* IOMMU AVIC Function */
|
||||
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
|
||||
|
||||
extern int
|
||||
amd_iommu_update_ga(int cpu, bool is_run, void *data);
|
||||
|
||||
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
|
||||
|
||||
static inline int
|
||||
amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
amd_iommu_update_ga(int cpu, bool is_run, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
|
||||
|
||||
#endif /* _ASM_X86_AMD_IOMMU_H */
|
||||
|
@@ -105,6 +105,7 @@ enum {
|
||||
ATA_ID_CFA_KEY_MGMT = 162,
|
||||
ATA_ID_CFA_MODES = 163,
|
||||
ATA_ID_DATA_SET_MGMT = 169,
|
||||
ATA_ID_SCT_CMD_XPORT = 206,
|
||||
ATA_ID_ROT_SPEED = 217,
|
||||
ATA_ID_PIO4 = (1 << 1),
|
||||
|
||||
@@ -788,6 +789,48 @@ static inline bool ata_id_sense_reporting_enabled(const u16 *id)
|
||||
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Word: 206 - SCT Command Transport
|
||||
* 15:12 - Vendor Specific
|
||||
* 11:6 - Reserved
|
||||
* 5 - SCT Command Transport Data Tables supported
|
||||
* 4 - SCT Command Transport Features Control supported
|
||||
* 3 - SCT Command Transport Error Recovery Control supported
|
||||
* 2 - SCT Command Transport Write Same supported
|
||||
* 1 - SCT Command Transport Long Sector Access supported
|
||||
* 0 - SCT Command Transport supported
|
||||
*/
|
||||
static inline bool ata_id_sct_data_tables(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_features_ctrl(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_write_same(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_long_sector_access(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_supported(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_id_major_version - get ATA level of drive
|
||||
* @id: Identify data
|
||||
@@ -1071,32 +1114,6 @@ static inline void ata_id_to_hd_driveid(u16 *id)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Write LBA Range Entries to the buffer that will cover the extent from
|
||||
* sector to sector + count. This is used for TRIM and for ADD LBA(S)
|
||||
* TO NV CACHE PINNED SET.
|
||||
*/
|
||||
static inline unsigned ata_set_lba_range_entries(void *_buffer,
|
||||
unsigned num, u64 sector, unsigned long count)
|
||||
{
|
||||
__le64 *buffer = _buffer;
|
||||
unsigned i = 0, used_bytes;
|
||||
|
||||
while (i < num) {
|
||||
u64 entry = sector |
|
||||
((u64)(count > 0xffff ? 0xffff : count) << 48);
|
||||
buffer[i++] = __cpu_to_le64(entry);
|
||||
if (count <= 0xffff)
|
||||
break;
|
||||
count -= 0xffff;
|
||||
sector += 0xffff;
|
||||
}
|
||||
|
||||
used_bytes = ALIGN(i * 8, 512);
|
||||
memset(buffer + i, 0, used_bytes - i * 8);
|
||||
return used_bytes;
|
||||
}
|
||||
|
||||
static inline bool ata_ok(u8 status)
|
||||
{
|
||||
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
|
||||
|
@@ -118,6 +118,8 @@
|
||||
|
||||
#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
|
||||
#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
|
||||
#define ATMEL_US_FP_OFFSET 16 /* Fractional Part */
|
||||
#define ATMEL_US_FP_MASK 0x7
|
||||
|
||||
#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */
|
||||
#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */
|
||||
|
@@ -10,214 +10,5 @@
|
||||
#ifndef _LINUX_AUTO_DEV_IOCTL_H
|
||||
#define _LINUX_AUTO_DEV_IOCTL_H
|
||||
|
||||
#include <linux/auto_fs.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#define AUTOFS_DEVICE_NAME "autofs"
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
|
||||
#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
|
||||
|
||||
#define AUTOFS_DEVID_LEN 16
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
|
||||
|
||||
/*
|
||||
* An ioctl interface for autofs mount point control.
|
||||
*/
|
||||
|
||||
struct args_protover {
|
||||
__u32 version;
|
||||
};
|
||||
|
||||
struct args_protosubver {
|
||||
__u32 sub_version;
|
||||
};
|
||||
|
||||
struct args_openmount {
|
||||
__u32 devid;
|
||||
};
|
||||
|
||||
struct args_ready {
|
||||
__u32 token;
|
||||
};
|
||||
|
||||
struct args_fail {
|
||||
__u32 token;
|
||||
__s32 status;
|
||||
};
|
||||
|
||||
struct args_setpipefd {
|
||||
__s32 pipefd;
|
||||
};
|
||||
|
||||
struct args_timeout {
|
||||
__u64 timeout;
|
||||
};
|
||||
|
||||
struct args_requester {
|
||||
__u32 uid;
|
||||
__u32 gid;
|
||||
};
|
||||
|
||||
struct args_expire {
|
||||
__u32 how;
|
||||
};
|
||||
|
||||
struct args_askumount {
|
||||
__u32 may_umount;
|
||||
};
|
||||
|
||||
struct args_ismountpoint {
|
||||
union {
|
||||
struct args_in {
|
||||
__u32 type;
|
||||
} in;
|
||||
struct args_out {
|
||||
__u32 devid;
|
||||
__u32 magic;
|
||||
} out;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* All the ioctls use this structure.
|
||||
* When sending a path size must account for the total length
|
||||
* of the chunk of memory otherwise is is the size of the
|
||||
* structure.
|
||||
*/
|
||||
|
||||
struct autofs_dev_ioctl {
|
||||
__u32 ver_major;
|
||||
__u32 ver_minor;
|
||||
__u32 size; /* total size of data passed in
|
||||
* including this struct */
|
||||
__s32 ioctlfd; /* automount command fd */
|
||||
|
||||
/* Command parameters */
|
||||
|
||||
union {
|
||||
struct args_protover protover;
|
||||
struct args_protosubver protosubver;
|
||||
struct args_openmount openmount;
|
||||
struct args_ready ready;
|
||||
struct args_fail fail;
|
||||
struct args_setpipefd setpipefd;
|
||||
struct args_timeout timeout;
|
||||
struct args_requester requester;
|
||||
struct args_expire expire;
|
||||
struct args_askumount askumount;
|
||||
struct args_ismountpoint ismountpoint;
|
||||
};
|
||||
|
||||
char path[0];
|
||||
};
|
||||
|
||||
static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
|
||||
{
|
||||
memset(in, 0, sizeof(struct autofs_dev_ioctl));
|
||||
in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
|
||||
in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
|
||||
in->size = sizeof(struct autofs_dev_ioctl);
|
||||
in->ioctlfd = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If you change this make sure you make the corresponding change
|
||||
* to autofs-dev-ioctl.c:lookup_ioctl()
|
||||
*/
|
||||
enum {
|
||||
/* Get various version info */
|
||||
AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
|
||||
AUTOFS_DEV_IOCTL_PROTOVER_CMD,
|
||||
AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
|
||||
|
||||
/* Open mount ioctl fd */
|
||||
AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
|
||||
|
||||
/* Close mount ioctl fd */
|
||||
AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
|
||||
|
||||
/* Mount/expire status returns */
|
||||
AUTOFS_DEV_IOCTL_READY_CMD,
|
||||
AUTOFS_DEV_IOCTL_FAIL_CMD,
|
||||
|
||||
/* Activate/deactivate autofs mount */
|
||||
AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
|
||||
AUTOFS_DEV_IOCTL_CATATONIC_CMD,
|
||||
|
||||
/* Expiry timeout */
|
||||
AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
|
||||
|
||||
/* Get mount last requesting uid and gid */
|
||||
AUTOFS_DEV_IOCTL_REQUESTER_CMD,
|
||||
|
||||
/* Check for eligible expire candidates */
|
||||
AUTOFS_DEV_IOCTL_EXPIRE_CMD,
|
||||
|
||||
/* Request busy status */
|
||||
AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
|
||||
|
||||
/* Check if path is a mountpoint */
|
||||
AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
|
||||
};
|
||||
|
||||
#define AUTOFS_IOCTL 0x93
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_VERSION \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_PROTOVER \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_OPENMOUNT \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_READY \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_FAIL \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_SETPIPEFD \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_CATATONIC \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_TIMEOUT \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_REQUESTER \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_EXPIRE \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
|
||||
_IOWR(AUTOFS_IOCTL, \
|
||||
AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
|
||||
|
||||
#include <uapi/linux/auto_dev-ioctl.h>
|
||||
#endif /* _LINUX_AUTO_DEV_IOCTL_H */
|
||||
|
@@ -10,7 +10,6 @@
|
||||
#define _LINUX_AUTO_FS_H
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <uapi/linux/auto_fs.h>
|
||||
#endif /* _LINUX_AUTO_FS_H */
|
||||
|
@@ -205,6 +205,9 @@ struct bcma_host_ops {
|
||||
#define BCMA_PKG_ID_BCM4709 0
|
||||
#define BCMA_CHIP_ID_BCM47094 53030
|
||||
#define BCMA_CHIP_ID_BCM53018 53018
|
||||
#define BCMA_CHIP_ID_BCM53573 53573
|
||||
#define BCMA_PKG_ID_BCM53573 0
|
||||
#define BCMA_PKG_ID_BCM47189 1
|
||||
|
||||
/* Board types (on PCI usually equals to the subsystem dev id) */
|
||||
/* BCM4313 */
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */
|
||||
#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
|
||||
#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
|
||||
#define BCMA_CLKCTLST_HQCLKREQ 0x00000040 /* HQ Clock */
|
||||
#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */
|
||||
#define BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
|
||||
#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */
|
||||
@@ -23,6 +24,7 @@
|
||||
#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
|
||||
|
||||
/* Agent registers (common for every core) */
|
||||
#define BCMA_OOB_SEL_OUT_A30 0x0100
|
||||
#define BCMA_IOCTL 0x0408 /* IO control */
|
||||
#define BCMA_IOCTL_CLK 0x0001
|
||||
#define BCMA_IOCTL_FGC 0x0002
|
||||
|
@@ -1,6 +1,4 @@
|
||||
/*
|
||||
* 2.5 block I/O model
|
||||
*
|
||||
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
@@ -461,6 +459,7 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
|
||||
|
||||
extern void bio_copy_data(struct bio *dst, struct bio *src);
|
||||
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
|
||||
extern void bio_free_pages(struct bio *bio);
|
||||
|
||||
extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||
struct rq_map_data *,
|
||||
|
93
include/linux/bitfield.h
Normal file
93
include/linux/bitfield.h
Normal file
@@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
|
||||
* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_BITFIELD_H
|
||||
#define _LINUX_BITFIELD_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
|
||||
/*
|
||||
* Bitfield access macros
|
||||
*
|
||||
* FIELD_{GET,PREP} macros take as first parameter shifted mask
|
||||
* from which they extract the base mask and shift amount.
|
||||
* Mask must be a compilation time constant.
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* #define REG_FIELD_A GENMASK(6, 0)
|
||||
* #define REG_FIELD_B BIT(7)
|
||||
* #define REG_FIELD_C GENMASK(15, 8)
|
||||
* #define REG_FIELD_D GENMASK(31, 16)
|
||||
*
|
||||
* Get:
|
||||
* a = FIELD_GET(REG_FIELD_A, reg);
|
||||
* b = FIELD_GET(REG_FIELD_B, reg);
|
||||
*
|
||||
* Set:
|
||||
* reg = FIELD_PREP(REG_FIELD_A, 1) |
|
||||
* FIELD_PREP(REG_FIELD_B, 0) |
|
||||
* FIELD_PREP(REG_FIELD_C, c) |
|
||||
* FIELD_PREP(REG_FIELD_D, 0x40);
|
||||
*
|
||||
* Modify:
|
||||
* reg &= ~REG_FIELD_C;
|
||||
* reg |= FIELD_PREP(REG_FIELD_C, c);
|
||||
*/
|
||||
|
||||
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
|
||||
|
||||
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
|
||||
({ \
|
||||
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
|
||||
_pfx "mask is not constant"); \
|
||||
BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
|
||||
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
|
||||
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
|
||||
_pfx "value too large for the field"); \
|
||||
BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
|
||||
_pfx "type of reg too small for mask"); \
|
||||
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
|
||||
(1ULL << __bf_shf(_mask))); \
|
||||
})
|
||||
|
||||
/**
|
||||
* FIELD_PREP() - prepare a bitfield element
|
||||
* @_mask: shifted mask defining the field's length and position
|
||||
* @_val: value to put in the field
|
||||
*
|
||||
* FIELD_PREP() masks and shifts up the value. The result should
|
||||
* be combined with other fields of the bitfield using logical OR.
|
||||
*/
|
||||
#define FIELD_PREP(_mask, _val) \
|
||||
({ \
|
||||
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
|
||||
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
|
||||
})
|
||||
|
||||
/**
|
||||
* FIELD_GET() - extract a bitfield element
|
||||
* @_mask: shifted mask defining the field's length and position
|
||||
* @_reg: 32bit value of entire bitfield
|
||||
*
|
||||
* FIELD_GET() extracts the field specified by @_mask from the
|
||||
* bitfield passed in as @_reg by masking and shifting it down.
|
||||
*/
|
||||
#define FIELD_GET(_mask, _reg) \
|
||||
({ \
|
||||
__BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
|
||||
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
|
||||
})
|
||||
|
||||
#endif
|
@@ -339,6 +339,24 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
|
||||
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
|
||||
}
|
||||
|
||||
/*
|
||||
* bitmap_from_u64 - Check and swap words within u64.
|
||||
* @mask: source bitmap
|
||||
* @dst: destination bitmap
|
||||
*
|
||||
* In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
|
||||
* to read u64 mask, we will get the wrong word.
|
||||
* That is "(u32 *)(&val)[0]" gets the upper 32 bits,
|
||||
* but we expect the lower 32-bits of u64.
|
||||
*/
|
||||
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
|
||||
{
|
||||
dst[0] = mask & ULONG_MAX;
|
||||
|
||||
if (sizeof(mask) > sizeof(unsigned long))
|
||||
dst[1] = mask >> 32;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __LINUX_BITMAP_H */
|
||||
|
@@ -65,16 +65,6 @@ static inline int get_bitmask_order(unsigned int count)
|
||||
return order; /* We could be slightly more clever with -1 here... */
|
||||
}
|
||||
|
||||
static inline int get_count_order(unsigned int count)
|
||||
{
|
||||
int order;
|
||||
|
||||
order = fls(count) - 1;
|
||||
if (count & (count - 1))
|
||||
order++;
|
||||
return order;
|
||||
}
|
||||
|
||||
static __always_inline unsigned long hweight_long(unsigned long w)
|
||||
{
|
||||
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
|
||||
@@ -191,6 +181,32 @@ static inline unsigned fls_long(unsigned long l)
|
||||
return fls64(l);
|
||||
}
|
||||
|
||||
static inline int get_count_order(unsigned int count)
|
||||
{
|
||||
int order;
|
||||
|
||||
order = fls(count) - 1;
|
||||
if (count & (count - 1))
|
||||
order++;
|
||||
return order;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_count_order_long - get order after rounding @l up to power of 2
|
||||
* @l: parameter
|
||||
*
|
||||
* it is same as get_count_order() but with long type parameter
|
||||
*/
|
||||
static inline int get_count_order_long(unsigned long l)
|
||||
{
|
||||
if (l == 0UL)
|
||||
return -1;
|
||||
else if (l & (l - 1UL))
|
||||
return (int)fls_long(l);
|
||||
else
|
||||
return (int)fls_long(l) - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs64 - find first set bit in a 64 bit word
|
||||
* @word: The 64 bit word
|
||||
|
@@ -45,7 +45,7 @@ struct blkcg {
|
||||
spinlock_t lock;
|
||||
|
||||
struct radix_tree_root blkg_tree;
|
||||
struct blkcg_gq *blkg_hint;
|
||||
struct blkcg_gq __rcu *blkg_hint;
|
||||
struct hlist_head blkg_list;
|
||||
|
||||
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
|
||||
@@ -343,16 +343,7 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
|
||||
*/
|
||||
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
||||
{
|
||||
char *p;
|
||||
|
||||
p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
if (!p) {
|
||||
strncpy(buf, "<unavailable>", buflen);
|
||||
return -ENAMETOOLONG;
|
||||
}
|
||||
|
||||
memmove(buf, p, buf + buflen - p);
|
||||
return 0;
|
||||
return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
}
|
||||
|
||||
/**
|
||||
|
9
include/linux/blk-mq-pci.h
Normal file
9
include/linux/blk-mq-pci.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _LINUX_BLK_MQ_PCI_H
|
||||
#define _LINUX_BLK_MQ_PCI_H
|
||||
|
||||
struct blk_mq_tag_set;
|
||||
struct pci_dev;
|
||||
|
||||
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
|
||||
|
||||
#endif /* _LINUX_BLK_MQ_PCI_H */
|
@@ -2,31 +2,19 @@
|
||||
#define BLK_MQ_H
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sbitmap.h>
|
||||
|
||||
struct blk_mq_tags;
|
||||
struct blk_flush_queue;
|
||||
|
||||
struct blk_mq_cpu_notifier {
|
||||
struct list_head list;
|
||||
void *data;
|
||||
int (*notify)(void *data, unsigned long action, unsigned int cpu);
|
||||
};
|
||||
|
||||
struct blk_mq_ctxmap {
|
||||
unsigned int size;
|
||||
unsigned int bits_per_word;
|
||||
struct blk_align_bitmap *map;
|
||||
};
|
||||
|
||||
struct blk_mq_hw_ctx {
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
struct list_head dispatch;
|
||||
unsigned long state; /* BLK_MQ_S_* flags */
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
unsigned long state; /* BLK_MQ_S_* flags */
|
||||
struct delayed_work run_work;
|
||||
struct delayed_work delay_work;
|
||||
struct work_struct run_work;
|
||||
cpumask_var_t cpumask;
|
||||
int next_cpu;
|
||||
int next_cpu_batch;
|
||||
@@ -38,10 +26,10 @@ struct blk_mq_hw_ctx {
|
||||
|
||||
void *driver_data;
|
||||
|
||||
struct blk_mq_ctxmap ctx_map;
|
||||
struct sbitmap ctx_map;
|
||||
|
||||
unsigned int nr_ctx;
|
||||
struct blk_mq_ctx **ctxs;
|
||||
unsigned int nr_ctx;
|
||||
|
||||
atomic_t wait_index;
|
||||
|
||||
@@ -49,7 +37,7 @@ struct blk_mq_hw_ctx {
|
||||
|
||||
unsigned long queued;
|
||||
unsigned long run;
|
||||
#define BLK_MQ_MAX_DISPATCH_ORDER 10
|
||||
#define BLK_MQ_MAX_DISPATCH_ORDER 7
|
||||
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
|
||||
|
||||
unsigned int numa_node;
|
||||
@@ -57,14 +45,18 @@ struct blk_mq_hw_ctx {
|
||||
|
||||
atomic_t nr_active;
|
||||
|
||||
struct blk_mq_cpu_notifier cpu_notifier;
|
||||
struct delayed_work delay_work;
|
||||
|
||||
struct hlist_node cpuhp_dead;
|
||||
struct kobject kobj;
|
||||
|
||||
unsigned long poll_considered;
|
||||
unsigned long poll_invoked;
|
||||
unsigned long poll_success;
|
||||
};
|
||||
|
||||
struct blk_mq_tag_set {
|
||||
unsigned int *mq_map;
|
||||
struct blk_mq_ops *ops;
|
||||
unsigned int nr_hw_queues;
|
||||
unsigned int queue_depth; /* max hw supported */
|
||||
@@ -88,7 +80,6 @@ struct blk_mq_queue_data {
|
||||
};
|
||||
|
||||
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
|
||||
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
|
||||
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
||||
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
||||
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
@@ -102,6 +93,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
|
||||
bool);
|
||||
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
|
||||
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
|
||||
|
||||
|
||||
struct blk_mq_ops {
|
||||
@@ -110,11 +102,6 @@ struct blk_mq_ops {
|
||||
*/
|
||||
queue_rq_fn *queue_rq;
|
||||
|
||||
/*
|
||||
* Map to specific hardware queue
|
||||
*/
|
||||
map_queue_fn *map_queue;
|
||||
|
||||
/*
|
||||
* Called on request timeout
|
||||
*/
|
||||
@@ -147,6 +134,8 @@ struct blk_mq_ops {
|
||||
init_request_fn *init_request;
|
||||
exit_request_fn *exit_request;
|
||||
reinit_request_fn *reinit_request;
|
||||
|
||||
map_queues_fn *map_queues;
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -158,6 +147,7 @@ enum {
|
||||
BLK_MQ_F_TAG_SHARED = 1 << 1,
|
||||
BLK_MQ_F_SG_MERGE = 1 << 2,
|
||||
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
|
||||
BLK_MQ_F_BLOCKING = 1 << 5,
|
||||
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
|
||||
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
|
||||
|
||||
@@ -178,8 +168,8 @@ enum {
|
||||
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
|
||||
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
struct request_queue *q);
|
||||
int blk_mq_register_disk(struct gendisk *);
|
||||
void blk_mq_unregister_disk(struct gendisk *);
|
||||
int blk_mq_register_dev(struct device *, struct request_queue *);
|
||||
void blk_mq_unregister_dev(struct device *, struct request_queue *);
|
||||
|
||||
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
|
||||
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
|
||||
@@ -201,7 +191,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
||||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
|
||||
unsigned int flags, unsigned int hctx_idx);
|
||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||
struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
|
||||
|
||||
enum {
|
||||
BLK_MQ_UNIQUE_TAG_BITS = 16,
|
||||
@@ -220,8 +209,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
|
||||
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
|
||||
}
|
||||
|
||||
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
||||
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
|
||||
|
||||
int blk_mq_request_started(struct request *rq);
|
||||
void blk_mq_start_request(struct request *rq);
|
||||
@@ -232,6 +219,7 @@ void blk_mq_requeue_request(struct request *rq);
|
||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
|
||||
void blk_mq_cancel_requeue_work(struct request_queue *q);
|
||||
void blk_mq_kick_requeue_list(struct request_queue *q);
|
||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
|
||||
void blk_mq_abort_requeue_list(struct request_queue *q);
|
||||
void blk_mq_complete_request(struct request *rq, int error);
|
||||
|
||||
|
@@ -16,7 +16,6 @@ struct block_device;
|
||||
struct io_context;
|
||||
struct cgroup_subsys_state;
|
||||
typedef void (bio_end_io_t) (struct bio *);
|
||||
typedef void (bio_destructor_t) (struct bio *);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
/*
|
||||
@@ -89,14 +88,22 @@ struct bio {
|
||||
struct bio_vec bi_inline_vecs[0];
|
||||
};
|
||||
|
||||
#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS)
|
||||
#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
|
||||
#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
|
||||
#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
|
||||
|
||||
#define bio_set_op_attrs(bio, op, op_flags) do { \
|
||||
WARN_ON(op >= (1 << REQ_OP_BITS)); \
|
||||
(bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \
|
||||
(bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
|
||||
(bio)->bi_opf |= op_flags; \
|
||||
#define bio_set_op_attrs(bio, op, op_flags) do { \
|
||||
if (__builtin_constant_p(op)) \
|
||||
BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
|
||||
else \
|
||||
WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
|
||||
if (__builtin_constant_p(op_flags)) \
|
||||
BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
|
||||
else \
|
||||
WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
|
||||
(bio)->bi_opf = bio_flags(bio); \
|
||||
(bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
|
||||
(bio)->bi_opf |= (op_flags); \
|
||||
} while (0)
|
||||
|
||||
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
|
||||
|
@@ -449,7 +449,7 @@ struct request_queue {
|
||||
|
||||
struct list_head requeue_list;
|
||||
spinlock_t requeue_lock;
|
||||
struct work_struct requeue_work;
|
||||
struct delayed_work requeue_work;
|
||||
|
||||
struct mutex sysfs_lock;
|
||||
|
||||
@@ -1440,8 +1440,8 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
||||
return bio_will_gap(req->q, bio, req->bio);
|
||||
}
|
||||
|
||||
struct work_struct;
|
||||
int kblockd_schedule_work(struct work_struct *work);
|
||||
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
|
||||
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
|
||||
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
|
||||
|
||||
|
@@ -10,28 +10,10 @@
|
||||
#include <linux/cache.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
* We want a power-of-two. Is there a better way than this?
|
||||
*/
|
||||
|
||||
#if NR_CPUS >= 32
|
||||
#define NR_BG_LOCKS 128
|
||||
#elif NR_CPUS >= 16
|
||||
#define NR_BG_LOCKS 64
|
||||
#elif NR_CPUS >= 8
|
||||
#define NR_BG_LOCKS 32
|
||||
#elif NR_CPUS >= 4
|
||||
#define NR_BG_LOCKS 16
|
||||
#elif NR_CPUS >= 2
|
||||
#define NR_BG_LOCKS 8
|
||||
#define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
|
||||
#else
|
||||
#define NR_BG_LOCKS 4
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
#define NR_BG_LOCKS 1
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif
|
||||
|
||||
struct bgl_lock {
|
||||
spinlock_t lock;
|
||||
@@ -49,14 +31,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl)
|
||||
spin_lock_init(&bgl->locks[i].lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* The accessor is a macro so we can embed a blockgroup_lock into different
|
||||
* superblock types
|
||||
*/
|
||||
static inline spinlock_t *
|
||||
bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
|
||||
{
|
||||
return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock;
|
||||
return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* simple boot-time physical memory area allocator.
|
||||
@@ -119,6 +120,10 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_LOW_ADDRESS_LIMIT
|
||||
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
|
||||
#endif
|
||||
|
||||
#define alloc_bootmem(x) \
|
||||
__alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
|
||||
#define alloc_bootmem_align(x, align) \
|
||||
@@ -180,10 +185,6 @@ static inline void * __init memblock_virt_alloc_nopanic(
|
||||
NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
#ifndef ARCH_LOW_ADDRESS_LIMIT
|
||||
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
|
||||
#endif
|
||||
|
||||
static inline void * __init memblock_virt_alloc_low(
|
||||
phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
|
@@ -96,6 +96,7 @@ enum bpf_return_type {
|
||||
struct bpf_func_proto {
|
||||
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
bool gpl_only;
|
||||
bool pkt_access;
|
||||
enum bpf_return_type ret_type;
|
||||
enum bpf_arg_type arg1_type;
|
||||
enum bpf_arg_type arg2_type;
|
||||
@@ -138,6 +139,13 @@ enum bpf_reg_type {
|
||||
*/
|
||||
PTR_TO_PACKET,
|
||||
PTR_TO_PACKET_END, /* skb->data + headlen */
|
||||
|
||||
/* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
|
||||
* elem value. We only allow this if we can statically verify that
|
||||
* access from this register are going to fall within the size of the
|
||||
* map element.
|
||||
*/
|
||||
PTR_TO_MAP_VALUE_ADJ,
|
||||
};
|
||||
|
||||
struct bpf_prog;
|
||||
@@ -151,7 +159,8 @@ struct bpf_verifier_ops {
|
||||
*/
|
||||
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
|
||||
enum bpf_reg_type *reg_type);
|
||||
|
||||
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
|
||||
const struct bpf_prog *prog);
|
||||
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
|
||||
int src_reg, int ctx_off,
|
||||
struct bpf_insn *insn, struct bpf_prog *prog);
|
||||
@@ -297,6 +306,10 @@ static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
|
||||
static inline void bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
/* verifier prototypes for helper functions called from eBPF programs */
|
||||
|
102
include/linux/bpf_verifier.h
Normal file
102
include/linux/bpf_verifier.h
Normal file
@@ -0,0 +1,102 @@
|
||||
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef _LINUX_BPF_VERIFIER_H
|
||||
#define _LINUX_BPF_VERIFIER_H 1
|
||||
|
||||
#include <linux/bpf.h> /* for enum bpf_reg_type */
|
||||
#include <linux/filter.h> /* for MAX_BPF_STACK */
|
||||
|
||||
/* Just some arbitrary values so we can safely do math without overflowing and
|
||||
* are obviously wrong for any sort of memory access.
|
||||
*/
|
||||
#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
|
||||
#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
|
||||
|
||||
struct bpf_reg_state {
|
||||
enum bpf_reg_type type;
|
||||
/*
|
||||
* Used to determine if any memory access using this register will
|
||||
* result in a bad access.
|
||||
*/
|
||||
u64 min_value, max_value;
|
||||
union {
|
||||
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
|
||||
s64 imm;
|
||||
|
||||
/* valid when type == PTR_TO_PACKET* */
|
||||
struct {
|
||||
u32 id;
|
||||
u16 off;
|
||||
u16 range;
|
||||
};
|
||||
|
||||
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
|
||||
* PTR_TO_MAP_VALUE_OR_NULL
|
||||
*/
|
||||
struct bpf_map *map_ptr;
|
||||
};
|
||||
};
|
||||
|
||||
enum bpf_stack_slot_type {
|
||||
STACK_INVALID, /* nothing was stored in this stack slot */
|
||||
STACK_SPILL, /* register spilled into stack */
|
||||
STACK_MISC /* BPF program wrote some data into this slot */
|
||||
};
|
||||
|
||||
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
||||
|
||||
/* state of the program:
|
||||
* type of all registers and stack info
|
||||
*/
|
||||
struct bpf_verifier_state {
|
||||
struct bpf_reg_state regs[MAX_BPF_REG];
|
||||
u8 stack_slot_type[MAX_BPF_STACK];
|
||||
struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
|
||||
};
|
||||
|
||||
/* linked list of verifier states used to prune search */
|
||||
struct bpf_verifier_state_list {
|
||||
struct bpf_verifier_state state;
|
||||
struct bpf_verifier_state_list *next;
|
||||
};
|
||||
|
||||
struct bpf_insn_aux_data {
|
||||
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
|
||||
};
|
||||
|
||||
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
||||
|
||||
struct bpf_verifier_env;
|
||||
struct bpf_ext_analyzer_ops {
|
||||
int (*insn_hook)(struct bpf_verifier_env *env,
|
||||
int insn_idx, int prev_insn_idx);
|
||||
};
|
||||
|
||||
/* single container for all structs
|
||||
* one verifier_env per bpf_check() call
|
||||
*/
|
||||
struct bpf_verifier_env {
|
||||
struct bpf_prog *prog; /* eBPF program being verified */
|
||||
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
|
||||
int stack_size; /* number of states to be processed */
|
||||
struct bpf_verifier_state cur_state; /* current verifier state */
|
||||
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
|
||||
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
|
||||
void *analyzer_priv; /* pointer to external analyzer's private data */
|
||||
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
|
||||
u32 used_map_cnt; /* number of used maps */
|
||||
u32 id_gen; /* used to generate unique reg IDs */
|
||||
bool allow_ptr_leaks;
|
||||
bool seen_direct_write;
|
||||
bool varlen_map_value_access;
|
||||
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
|
||||
};
|
||||
|
||||
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
|
||||
void *priv);
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
@@ -13,6 +13,7 @@ enum bug_trap_type {
|
||||
struct pt_regs;
|
||||
|
||||
#ifdef __CHECKER__
|
||||
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
|
||||
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
|
||||
#define BUILD_BUG_ON_ZERO(e) (0)
|
||||
#define BUILD_BUG_ON_NULL(e) ((void*)0)
|
||||
@@ -24,6 +25,8 @@ struct pt_regs;
|
||||
#else /* __CHECKER__ */
|
||||
|
||||
/* Force a compilation error if a constant expression is not a power of 2 */
|
||||
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
|
||||
BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
|
||||
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
|
||||
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
|
||||
|
||||
|
@@ -32,6 +32,7 @@ enum can_mode {
|
||||
* CAN common private data
|
||||
*/
|
||||
struct can_priv {
|
||||
struct net_device *dev;
|
||||
struct can_device_stats can_stats;
|
||||
|
||||
struct can_bittiming bittiming, data_bittiming;
|
||||
@@ -47,7 +48,7 @@ struct can_priv {
|
||||
u32 ctrlmode_static; /* static enabled options for driver/hardware */
|
||||
|
||||
int restart_ms;
|
||||
struct timer_list restart_timer;
|
||||
struct delayed_work restart_work;
|
||||
|
||||
int (*do_set_bittiming)(struct net_device *dev);
|
||||
int (*do_set_data_bittiming)(struct net_device *dev);
|
||||
|
@@ -238,9 +238,6 @@ struct ccp_xts_aes_engine {
|
||||
};
|
||||
|
||||
/***** SHA engine *****/
|
||||
#define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE
|
||||
#define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE
|
||||
|
||||
/**
|
||||
* ccp_sha_type - type of SHA operation
|
||||
*
|
||||
|
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
|
||||
|
||||
|
||||
/* One Touch Record Feature */
|
||||
static inline void cec_msg_record_off(struct cec_msg *msg)
|
||||
static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
|
||||
{
|
||||
msg->len = 2;
|
||||
msg->msg[1] = CEC_MSG_RECORD_OFF;
|
||||
msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
|
||||
}
|
||||
|
||||
struct cec_op_arib_data {
|
||||
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
|
||||
if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
|
||||
*msg++ = (digital->channel.channel_number_fmt << 2) |
|
||||
(digital->channel.major >> 8);
|
||||
*msg++ = digital->channel.major && 0xff;
|
||||
*msg++ = digital->channel.major & 0xff;
|
||||
*msg++ = digital->channel.minor >> 8;
|
||||
*msg++ = digital->channel.minor & 0xff;
|
||||
*msg++ = 0;
|
||||
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
|
||||
}
|
||||
|
||||
static inline void cec_msg_record_on(struct cec_msg *msg,
|
||||
bool reply,
|
||||
const struct cec_op_record_src *rec_src)
|
||||
{
|
||||
switch (rec_src->type) {
|
||||
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
|
||||
rec_src->ext_phys_addr.phys_addr);
|
||||
break;
|
||||
}
|
||||
msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
|
||||
}
|
||||
|
||||
static inline void cec_ops_record_on(const struct cec_msg *msg,
|
||||
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
|
||||
msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
|
||||
}
|
||||
|
||||
static inline void cec_msg_vendor_command(struct cec_msg *msg,
|
||||
__u8 size, const __u8 *vendor_cmd)
|
||||
{
|
||||
if (size > 14)
|
||||
size = 14;
|
||||
msg->len = 2 + size;
|
||||
msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
|
||||
memcpy(msg->msg + 2, vendor_cmd, size);
|
||||
}
|
||||
|
||||
static inline void cec_ops_vendor_command(const struct cec_msg *msg,
|
||||
__u8 *size,
|
||||
const __u8 **vendor_cmd)
|
||||
{
|
||||
*size = msg->len - 2;
|
||||
|
||||
if (*size > 14)
|
||||
*size = 14;
|
||||
*vendor_cmd = msg->msg + 2;
|
||||
}
|
||||
|
||||
static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
|
||||
__u32 vendor_id, __u8 size,
|
||||
const __u8 *vendor_cmd)
|
||||
{
|
||||
if (size > 11)
|
||||
size = 11;
|
||||
msg->len = 5 + size;
|
||||
msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
|
||||
msg->msg[2] = vendor_id >> 16;
|
||||
msg->msg[3] = (vendor_id >> 8) & 0xff;
|
||||
msg->msg[4] = vendor_id & 0xff;
|
||||
memcpy(msg->msg + 5, vendor_cmd, size);
|
||||
}
|
||||
|
||||
static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
|
||||
__u32 *vendor_id, __u8 *size,
|
||||
const __u8 **vendor_cmd)
|
||||
{
|
||||
*size = msg->len - 5;
|
||||
|
||||
if (*size > 11)
|
||||
*size = 11;
|
||||
*vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
|
||||
*vendor_cmd = msg->msg + 5;
|
||||
}
|
||||
|
||||
static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
|
||||
__u8 size,
|
||||
const __u8 *rc_code)
|
||||
{
|
||||
if (size > 14)
|
||||
size = 14;
|
||||
msg->len = 2 + size;
|
||||
msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
|
||||
memcpy(msg->msg + 2, rc_code, size);
|
||||
}
|
||||
|
||||
static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
|
||||
__u8 *size,
|
||||
const __u8 **rc_code)
|
||||
{
|
||||
*size = msg->len - 2;
|
||||
|
||||
if (*size > 14)
|
||||
*size = 14;
|
||||
*rc_code = msg->msg + 2;
|
||||
}
|
||||
|
||||
static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
|
||||
{
|
||||
msg->len = 2;
|
||||
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
|
||||
msg->len += 4;
|
||||
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
|
||||
(ui_cmd->channel_identifier.major >> 8);
|
||||
msg->msg[4] = ui_cmd->channel_identifier.major && 0xff;
|
||||
msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
|
||||
msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
|
||||
msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
|
||||
break;
|
||||
|
@@ -364,7 +364,7 @@ struct cec_caps {
|
||||
* @num_log_addrs: how many logical addresses should be claimed. Set by the
|
||||
* caller.
|
||||
* @vendor_id: the vendor ID of the device. Set by the caller.
|
||||
* @flags: set to 0.
|
||||
* @flags: flags.
|
||||
* @osd_name: the OSD name of the device. Set by the caller.
|
||||
* @primary_device_type: the primary device type for each logical address.
|
||||
* Set by the caller.
|
||||
@@ -389,6 +389,9 @@ struct cec_log_addrs {
|
||||
__u8 features[CEC_MAX_LOG_ADDRS][12];
|
||||
};
|
||||
|
||||
/* Allow a fallback to unregistered */
|
||||
#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
|
||||
|
||||
/* Events */
|
||||
|
||||
/* Event that occurs when the adapter state changes */
|
||||
|
@@ -104,7 +104,7 @@ extern int ceph_auth_build_hello(struct ceph_auth_client *ac,
|
||||
extern int ceph_handle_auth_reply(struct ceph_auth_client *ac,
|
||||
void *buf, size_t len,
|
||||
void *reply_buf, size_t reply_len);
|
||||
extern int ceph_entity_name_encode(const char *name, void **p, void *end);
|
||||
int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
|
||||
|
||||
extern int ceph_build_auth(struct ceph_auth_client *ac,
|
||||
void *msg_buf, size_t msg_len);
|
||||
|
@@ -138,6 +138,9 @@ struct ceph_dir_layout {
|
||||
#define CEPH_MSG_POOLOP_REPLY 48
|
||||
#define CEPH_MSG_POOLOP 49
|
||||
|
||||
/* mon commands */
|
||||
#define CEPH_MSG_MON_COMMAND 50
|
||||
#define CEPH_MSG_MON_COMMAND_ACK 51
|
||||
|
||||
/* osd */
|
||||
#define CEPH_MSG_OSD_MAP 41
|
||||
@@ -176,6 +179,14 @@ struct ceph_mon_statfs_reply {
|
||||
struct ceph_statfs st;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ceph_mon_command {
|
||||
struct ceph_mon_request_header monhdr;
|
||||
struct ceph_fsid fsid;
|
||||
__le32 num_strs; /* always 1 */
|
||||
__le32 str_len;
|
||||
char str[];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ceph_osd_getmap {
|
||||
struct ceph_mon_request_header monhdr;
|
||||
struct ceph_fsid fsid;
|
||||
@@ -270,6 +281,7 @@ enum {
|
||||
CEPH_SESSION_FLUSHMSG,
|
||||
CEPH_SESSION_FLUSHMSG_ACK,
|
||||
CEPH_SESSION_FORCE_RO,
|
||||
CEPH_SESSION_REJECT,
|
||||
};
|
||||
|
||||
extern const char *ceph_session_op_name(int op);
|
||||
|
49
include/linux/ceph/cls_lock_client.h
Normal file
49
include/linux/ceph/cls_lock_client.h
Normal file
@@ -0,0 +1,49 @@
|
||||
#ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H
|
||||
#define _LINUX_CEPH_CLS_LOCK_CLIENT_H
|
||||
|
||||
#include <linux/ceph/osd_client.h>
|
||||
|
||||
enum ceph_cls_lock_type {
|
||||
CEPH_CLS_LOCK_NONE = 0,
|
||||
CEPH_CLS_LOCK_EXCLUSIVE = 1,
|
||||
CEPH_CLS_LOCK_SHARED = 2,
|
||||
};
|
||||
|
||||
struct ceph_locker_id {
|
||||
struct ceph_entity_name name; /* locker's client name */
|
||||
char *cookie; /* locker's cookie */
|
||||
};
|
||||
|
||||
struct ceph_locker_info {
|
||||
struct ceph_entity_addr addr; /* locker's address */
|
||||
};
|
||||
|
||||
struct ceph_locker {
|
||||
struct ceph_locker_id id;
|
||||
struct ceph_locker_info info;
|
||||
};
|
||||
|
||||
int ceph_cls_lock(struct ceph_osd_client *osdc,
|
||||
struct ceph_object_id *oid,
|
||||
struct ceph_object_locator *oloc,
|
||||
char *lock_name, u8 type, char *cookie,
|
||||
char *tag, char *desc, u8 flags);
|
||||
int ceph_cls_unlock(struct ceph_osd_client *osdc,
|
||||
struct ceph_object_id *oid,
|
||||
struct ceph_object_locator *oloc,
|
||||
char *lock_name, char *cookie);
|
||||
int ceph_cls_break_lock(struct ceph_osd_client *osdc,
|
||||
struct ceph_object_id *oid,
|
||||
struct ceph_object_locator *oloc,
|
||||
char *lock_name, char *cookie,
|
||||
struct ceph_entity_name *locker);
|
||||
|
||||
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers);
|
||||
|
||||
int ceph_cls_lock_info(struct ceph_osd_client *osdc,
|
||||
struct ceph_object_id *oid,
|
||||
struct ceph_object_locator *oloc,
|
||||
char *lock_name, u8 *type, char **tag,
|
||||
struct ceph_locker **lockers, u32 *num_lockers);
|
||||
|
||||
#endif
|
@@ -264,7 +264,8 @@ extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
|
||||
void *private,
|
||||
u64 supported_features,
|
||||
u64 required_features);
|
||||
extern u64 ceph_client_id(struct ceph_client *client);
|
||||
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
|
||||
u64 ceph_client_gid(struct ceph_client *client);
|
||||
extern void ceph_destroy_client(struct ceph_client *client);
|
||||
extern int __ceph_open_session(struct ceph_client *client,
|
||||
unsigned long started);
|
||||
|
@@ -141,6 +141,9 @@ int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
|
||||
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
|
||||
ceph_monc_callback_t cb, u64 private_data);
|
||||
|
||||
int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
|
||||
struct ceph_entity_addr *client_addr);
|
||||
|
||||
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
|
||||
|
||||
extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
|
||||
|
@@ -120,6 +120,9 @@ struct ceph_osd_req_op {
|
||||
struct ceph_osd_data request_data;
|
||||
struct ceph_osd_data response_data;
|
||||
} notify;
|
||||
struct {
|
||||
struct ceph_osd_data response_data;
|
||||
} list_watchers;
|
||||
struct {
|
||||
u64 expected_object_size;
|
||||
u64 expected_write_size;
|
||||
@@ -249,6 +252,12 @@ struct ceph_osd_linger_request {
|
||||
size_t *preply_len;
|
||||
};
|
||||
|
||||
struct ceph_watch_item {
|
||||
struct ceph_entity_name name;
|
||||
u64 cookie;
|
||||
struct ceph_entity_addr addr;
|
||||
};
|
||||
|
||||
struct ceph_osd_client {
|
||||
struct ceph_client *client;
|
||||
|
||||
@@ -346,7 +355,6 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
|
||||
struct page **pages, u64 length,
|
||||
u32 alignment, bool pages_from_pool,
|
||||
bool own_pages);
|
||||
|
||||
extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
|
||||
unsigned int which, u16 opcode,
|
||||
const char *class, const char *method);
|
||||
@@ -389,6 +397,14 @@ extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
|
||||
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
|
||||
void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc);
|
||||
|
||||
int ceph_osdc_call(struct ceph_osd_client *osdc,
|
||||
struct ceph_object_id *oid,
|
||||
struct ceph_object_locator *oloc,
|
||||
const char *class, const char *method,
|
||||
unsigned int flags,
|
||||
struct page *req_page, size_t req_len,
|
||||
struct page *resp_page, size_t *resp_len);
|
||||
|
||||
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
|
||||
struct ceph_vino vino,
|
||||
struct ceph_file_layout *layout,
|
||||
@@ -434,5 +450,10 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
|
||||
size_t *preply_len);
|
||||
int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_linger_request *lreq);
|
||||
int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
|
||||
struct ceph_object_id *oid,
|
||||
struct ceph_object_locator *oloc,
|
||||
struct ceph_watch_item **watchers,
|
||||
u32 *num_watchers);
|
||||
#endif
|
||||
|
||||
|
@@ -97,7 +97,7 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_rm_cftypes(struct cftype *cfts);
|
||||
void cgroup_file_notify(struct cgroup_file *cfile);
|
||||
|
||||
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
|
||||
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *tsk);
|
||||
@@ -497,6 +497,23 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
|
||||
return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
|
||||
}
|
||||
|
||||
/**
|
||||
* task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
|
||||
* @task: the task to be tested
|
||||
* @ancestor: possible ancestor of @task's cgroup
|
||||
*
|
||||
* Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
|
||||
* It follows all the same rules as cgroup_is_descendant, and only applies
|
||||
* to the default hierarchy.
|
||||
*/
|
||||
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
|
||||
struct cgroup *ancestor)
|
||||
{
|
||||
struct css_set *cset = task_css_set(task);
|
||||
|
||||
return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
|
||||
}
|
||||
|
||||
/* no synchronization, the result can only be used as a hint */
|
||||
static inline bool cgroup_is_populated(struct cgroup *cgrp)
|
||||
{
|
||||
@@ -538,8 +555,7 @@ static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
|
||||
return kernfs_name(cgrp->kn, buf, buflen);
|
||||
}
|
||||
|
||||
static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
|
||||
size_t buflen)
|
||||
static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
|
||||
{
|
||||
return kernfs_path(cgrp->kn, buf, buflen);
|
||||
}
|
||||
@@ -557,6 +573,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
struct cgroup_subsys_state;
|
||||
struct cgroup;
|
||||
|
||||
static inline void css_put(struct cgroup_subsys_state *css) {}
|
||||
static inline int cgroup_attach_task_all(struct task_struct *from,
|
||||
@@ -574,6 +591,11 @@ static inline void cgroup_free(struct task_struct *p) {}
|
||||
static inline int cgroup_init_early(void) { return 0; }
|
||||
static inline int cgroup_init(void) { return 0; }
|
||||
|
||||
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
|
||||
struct cgroup *ancestor)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif /* !CONFIG_CGROUPS */
|
||||
|
||||
/*
|
||||
@@ -621,6 +643,7 @@ struct cgroup_namespace {
|
||||
atomic_t count;
|
||||
struct ns_common ns;
|
||||
struct user_namespace *user_ns;
|
||||
struct ucounts *ucounts;
|
||||
struct css_set *root_cset;
|
||||
};
|
||||
|
||||
@@ -634,8 +657,8 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
|
||||
struct user_namespace *user_ns,
|
||||
struct cgroup_namespace *old_ns);
|
||||
|
||||
char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
|
||||
struct cgroup_namespace *ns);
|
||||
int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
|
||||
struct cgroup_namespace *ns);
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
|
@@ -772,7 +772,7 @@ struct clk_onecell_data {
|
||||
};
|
||||
|
||||
struct clk_hw_onecell_data {
|
||||
size_t num;
|
||||
unsigned int num;
|
||||
struct clk_hw *hws[];
|
||||
};
|
||||
|
||||
@@ -780,6 +780,18 @@ extern struct of_device_id __clk_of_table;
|
||||
|
||||
#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
|
||||
|
||||
/*
|
||||
* Use this macro when you have a driver that requires two initialization
|
||||
* routines, one at of_clk_init(), and one at platform device probe
|
||||
*/
|
||||
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
|
||||
static void name##_of_clk_init_driver(struct device_node *np) \
|
||||
{ \
|
||||
of_node_clear_flag(np, OF_POPULATED); \
|
||||
fn(np); \
|
||||
} \
|
||||
OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver)
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
int of_clk_add_provider(struct device_node *np,
|
||||
struct clk *(*clk_src_get)(struct of_phandle_args *args,
|
||||
@@ -842,7 +854,7 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
|
||||
{
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
static inline int of_clk_get_parent_count(struct device_node *np)
|
||||
static inline unsigned int of_clk_get_parent_count(struct device_node *np)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@@ -6,8 +6,10 @@
|
||||
* Lower value means higher priority, analogically to reclaim priority.
|
||||
*/
|
||||
enum compact_priority {
|
||||
COMPACT_PRIO_SYNC_FULL,
|
||||
MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL,
|
||||
COMPACT_PRIO_SYNC_LIGHT,
|
||||
MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
|
||||
MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
|
||||
DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
|
||||
COMPACT_PRIO_ASYNC,
|
||||
INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
|
||||
@@ -49,14 +51,37 @@ enum compact_result {
|
||||
COMPACT_CONTENDED,
|
||||
|
||||
/*
|
||||
* direct compaction partially compacted a zone and there might be
|
||||
* suitable pages
|
||||
* direct compaction terminated after concluding that the allocation
|
||||
* should now succeed
|
||||
*/
|
||||
COMPACT_PARTIAL,
|
||||
COMPACT_SUCCESS,
|
||||
};
|
||||
|
||||
struct alloc_context; /* in mm/internal.h */
|
||||
|
||||
/*
|
||||
* Number of free order-0 pages that should be available above given watermark
|
||||
* to make sure compaction has reasonable chance of not running out of free
|
||||
* pages that it needs to isolate as migration target during its work.
|
||||
*/
|
||||
static inline unsigned long compact_gap(unsigned int order)
|
||||
{
|
||||
/*
|
||||
* Although all the isolations for migration are temporary, compaction
|
||||
* free scanner may have up to 1 << order pages on its list and then
|
||||
* try to split an (order - 1) free page. At that point, a gap of
|
||||
* 1 << order might not be enough, so it's safer to require twice that
|
||||
* amount. Note that the number of pages on the list is also
|
||||
* effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum
|
||||
* that the migrate scanner can have isolated on migrate list, and free
|
||||
* scanner is only invoked when the number of isolated free pages is
|
||||
* lower than that. But it's not worth to complicate the formula here
|
||||
* as a bigger gap for higher orders than strictly necessary can also
|
||||
* improve chances of compaction success.
|
||||
*/
|
||||
return 2UL << order;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
extern int sysctl_compact_memory;
|
||||
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
||||
@@ -70,7 +95,6 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
|
||||
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
|
||||
unsigned int order, unsigned int alloc_flags,
|
||||
const struct alloc_context *ac, enum compact_priority prio);
|
||||
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
||||
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
||||
extern enum compact_result compaction_suitable(struct zone *zone, int order,
|
||||
unsigned int alloc_flags, int classzone_idx);
|
||||
@@ -89,7 +113,7 @@ static inline bool compaction_made_progress(enum compact_result result)
|
||||
* that the compaction successfully isolated and migrated some
|
||||
* pageblocks.
|
||||
*/
|
||||
if (result == COMPACT_PARTIAL)
|
||||
if (result == COMPACT_SUCCESS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@@ -154,10 +178,6 @@ extern void kcompactd_stop(int nid);
|
||||
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
|
||||
|
||||
#else
|
||||
static inline void compact_pgdat(pg_data_t *pgdat, int order)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
||||
{
|
||||
}
|
||||
|
@@ -432,7 +432,6 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
|
||||
|
||||
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
|
||||
|
||||
extern __printf(1, 2) int compat_printk(const char *fmt, ...);
|
||||
extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
|
||||
extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
|
||||
|
||||
|
@@ -188,6 +188,13 @@
|
||||
#endif /* GCC_VERSION >= 40300 */
|
||||
|
||||
#if GCC_VERSION >= 40500
|
||||
|
||||
#ifndef __CHECKER__
|
||||
#ifdef LATENT_ENTROPY_PLUGIN
|
||||
#define __latent_entropy __attribute__((latent_entropy))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mark a position in code as unreachable. This can be used to
|
||||
* suppress control flow warnings after asm blocks that transfer
|
||||
|
@@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||
# define unreachable() do { } while (1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* KENTRY - kernel entry point
|
||||
* This can be used to annotate symbols (functions or data) that are used
|
||||
* without their linker symbol being referenced explicitly. For example,
|
||||
* interrupt vector handlers, or functions in the kernel image that are found
|
||||
* programatically.
|
||||
*
|
||||
* Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
|
||||
* are handled in their own way (with KEEP() in linker scripts).
|
||||
*
|
||||
* KENTRY can be avoided if the symbols in question are marked as KEEP() in the
|
||||
* linker script. For example an architecture could KEEP() its entire
|
||||
* boot/exception vector code rather than annotate each function and data.
|
||||
*/
|
||||
#ifndef KENTRY
|
||||
# define KENTRY(sym) \
|
||||
extern typeof(sym) sym; \
|
||||
static const unsigned long __kentry_##sym \
|
||||
__used \
|
||||
__attribute__((section("___kentry" "+" #sym ), used)) \
|
||||
= (unsigned long)&sym;
|
||||
#endif
|
||||
|
||||
#ifndef RELOC_HIDE
|
||||
# define RELOC_HIDE(ptr, off) \
|
||||
({ unsigned long __ptr; \
|
||||
@@ -406,6 +429,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
# define __attribute_const__ /* unimplemented */
|
||||
#endif
|
||||
|
||||
#ifndef __latent_entropy
|
||||
# define __latent_entropy
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Tell gcc if a function is cold. The compiler will assume any path
|
||||
* directly leading to the call is unlikely.
|
||||
@@ -527,13 +554,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||
* object's lifetime is managed by something other than RCU. That
|
||||
* "something other" might be reference counting or simple immortality.
|
||||
*
|
||||
* The seemingly unused size_t variable is to validate @p is indeed a pointer
|
||||
* type by making sure it can be dereferenced.
|
||||
* The seemingly unused variable ___typecheck_p validates that @p is
|
||||
* indeed a pointer type by using a pointer to typeof(*p) as the type.
|
||||
* Taking a pointer to typeof(*p) again is needed in case p is void *.
|
||||
*/
|
||||
#define lockless_dereference(p) \
|
||||
({ \
|
||||
typeof(p) _________p1 = READ_ONCE(p); \
|
||||
size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \
|
||||
typeof(*(p)) *___typecheck_p __maybe_unused; \
|
||||
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
|
||||
(_________p1); \
|
||||
})
|
||||
|
@@ -173,6 +173,12 @@ static inline void console_sysfs_notify(void)
|
||||
#endif
|
||||
extern bool console_suspend_enabled;
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
extern void console_set_by_of(void);
|
||||
#else
|
||||
static inline void console_set_by_of(void) {}
|
||||
#endif
|
||||
|
||||
/* Suspend and resume console messages over PM events */
|
||||
extern void suspend_console(void);
|
||||
extern void resume_console(void);
|
||||
|
@@ -232,8 +232,9 @@ struct coresight_ops_source {
|
||||
int (*cpu_id)(struct coresight_device *csdev);
|
||||
int (*trace_id)(struct coresight_device *csdev);
|
||||
int (*enable)(struct coresight_device *csdev,
|
||||
struct perf_event_attr *attr, u32 mode);
|
||||
void (*disable)(struct coresight_device *csdev);
|
||||
struct perf_event *event, u32 mode);
|
||||
void (*disable)(struct coresight_device *csdev,
|
||||
struct perf_event *event);
|
||||
};
|
||||
|
||||
struct coresight_ops {
|
||||
|
@@ -61,17 +61,8 @@ struct notifier_block;
|
||||
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
|
||||
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
|
||||
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
|
||||
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
|
||||
* not handling interrupts, soon dead.
|
||||
* Called on the dying cpu, interrupts
|
||||
* are already disabled. Must not
|
||||
* sleep, must not fail */
|
||||
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
|
||||
* lock is dropped */
|
||||
#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
|
||||
* Called on the new cpu, just before
|
||||
* enabling interrupts. Must not sleep,
|
||||
* must not fail */
|
||||
#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
|
||||
* perhaps due to preemption. */
|
||||
|
||||
@@ -86,9 +77,6 @@ struct notifier_block;
|
||||
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
|
||||
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
|
||||
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
|
||||
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
|
||||
#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
|
||||
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern bool cpuhp_tasks_frozen;
|
||||
@@ -228,7 +216,11 @@ static inline void cpu_hotplug_done(void) {}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP_SMP
|
||||
extern int disable_nonboot_cpus(void);
|
||||
extern int freeze_secondary_cpus(int primary);
|
||||
static inline int disable_nonboot_cpus(void)
|
||||
{
|
||||
return freeze_secondary_cpus(0);
|
||||
}
|
||||
extern void enable_nonboot_cpus(void);
|
||||
#else /* !CONFIG_PM_SLEEP_SMP */
|
||||
static inline int disable_nonboot_cpus(void) { return 0; }
|
||||
@@ -239,6 +231,11 @@ void cpu_startup_entry(enum cpuhp_state state);
|
||||
|
||||
void cpu_idle_poll_ctrl(bool enable);
|
||||
|
||||
/* Attach to any functions which should be considered cpuidle. */
|
||||
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
|
||||
|
||||
bool cpu_in_idle(unsigned long pc);
|
||||
|
||||
void arch_cpu_idle(void);
|
||||
void arch_cpu_idle_prepare(void);
|
||||
void arch_cpu_idle_enter(void);
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#ifndef __CPUHOTPLUG_H
|
||||
#define __CPUHOTPLUG_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum cpuhp_state {
|
||||
CPUHP_OFFLINE,
|
||||
CPUHP_CREATE_THREADS,
|
||||
@@ -14,15 +16,42 @@ enum cpuhp_state {
|
||||
CPUHP_PERF_SUPERH,
|
||||
CPUHP_X86_HPET_DEAD,
|
||||
CPUHP_X86_APB_DEAD,
|
||||
CPUHP_VIRT_NET_DEAD,
|
||||
CPUHP_SLUB_DEAD,
|
||||
CPUHP_MM_WRITEBACK_DEAD,
|
||||
CPUHP_SOFTIRQ_DEAD,
|
||||
CPUHP_NET_MVNETA_DEAD,
|
||||
CPUHP_CPUIDLE_DEAD,
|
||||
CPUHP_ARM64_FPSIMD_DEAD,
|
||||
CPUHP_ARM_OMAP_WAKE_DEAD,
|
||||
CPUHP_IRQ_POLL_DEAD,
|
||||
CPUHP_BLOCK_SOFTIRQ_DEAD,
|
||||
CPUHP_VIRT_SCSI_DEAD,
|
||||
CPUHP_ACPI_CPUDRV_DEAD,
|
||||
CPUHP_S390_PFAULT_DEAD,
|
||||
CPUHP_BLK_MQ_DEAD,
|
||||
CPUHP_WORKQUEUE_PREP,
|
||||
CPUHP_POWER_NUMA_PREPARE,
|
||||
CPUHP_HRTIMERS_PREPARE,
|
||||
CPUHP_PROFILE_PREPARE,
|
||||
CPUHP_X2APIC_PREPARE,
|
||||
CPUHP_SMPCFD_PREPARE,
|
||||
CPUHP_RELAY_PREPARE,
|
||||
CPUHP_SLAB_PREPARE,
|
||||
CPUHP_MD_RAID5_PREPARE,
|
||||
CPUHP_RCUTREE_PREP,
|
||||
CPUHP_CPUIDLE_COUPLED_PREPARE,
|
||||
CPUHP_POWERPC_PMAC_PREPARE,
|
||||
CPUHP_POWERPC_MMU_CTX_PREPARE,
|
||||
CPUHP_XEN_PREPARE,
|
||||
CPUHP_XEN_EVTCHN_PREPARE,
|
||||
CPUHP_NOTIFY_PREPARE,
|
||||
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
|
||||
CPUHP_SH_SH3X_PREPARE,
|
||||
CPUHP_BLK_MQ_PREPARE,
|
||||
CPUHP_TIMERS_DEAD,
|
||||
CPUHP_NOTF_ERR_INJ_PREPARE,
|
||||
CPUHP_MIPS_SOC_PREPARE,
|
||||
CPUHP_BRINGUP_CPU,
|
||||
CPUHP_AP_IDLE_DEAD,
|
||||
CPUHP_AP_OFFLINE,
|
||||
@@ -45,6 +74,8 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_METAG_STARTING,
|
||||
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
|
||||
CPUHP_AP_ARM_VFP_STARTING,
|
||||
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
|
||||
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
|
||||
CPUHP_AP_PERF_ARM_STARTING,
|
||||
CPUHP_AP_ARM_L2X0_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
@@ -68,7 +99,6 @@ enum cpuhp_state {
|
||||
CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||
CPUHP_AP_SMPCFD_DYING,
|
||||
CPUHP_AP_X86_TBOOT_DYING,
|
||||
CPUHP_AP_NOTIFY_STARTING,
|
||||
CPUHP_AP_ONLINE,
|
||||
CPUHP_TEARDOWN_CPU,
|
||||
CPUHP_AP_ONLINE_IDLE,
|
||||
@@ -86,6 +116,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_S390_SF_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_NOTIFY_ONLINE,
|
||||
@@ -99,7 +130,7 @@ enum cpuhp_state {
|
||||
|
||||
int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
|
||||
int (*startup)(unsigned int cpu),
|
||||
int (*teardown)(unsigned int cpu));
|
||||
int (*teardown)(unsigned int cpu), bool multi_instance);
|
||||
|
||||
/**
|
||||
* cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
|
||||
@@ -116,7 +147,7 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
|
||||
int (*startup)(unsigned int cpu),
|
||||
int (*teardown)(unsigned int cpu))
|
||||
{
|
||||
return __cpuhp_setup_state(state, name, true, startup, teardown);
|
||||
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -135,7 +166,66 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
|
||||
int (*startup)(unsigned int cpu),
|
||||
int (*teardown)(unsigned int cpu))
|
||||
{
|
||||
return __cpuhp_setup_state(state, name, false, startup, teardown);
|
||||
return __cpuhp_setup_state(state, name, false, startup, teardown,
|
||||
false);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_setup_state_multi - Add callbacks for multi state
|
||||
* @state: The state for which the calls are installed
|
||||
* @name: Name of the callback.
|
||||
* @startup: startup callback function
|
||||
* @teardown: teardown callback function
|
||||
*
|
||||
* Sets the internal multi_instance flag and prepares a state to work as a multi
|
||||
* instance callback. No callbacks are invoked at this point. The callbacks are
|
||||
* invoked once an instance for this state are registered via
|
||||
* @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
|
||||
*/
|
||||
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
|
||||
const char *name,
|
||||
int (*startup)(unsigned int cpu,
|
||||
struct hlist_node *node),
|
||||
int (*teardown)(unsigned int cpu,
|
||||
struct hlist_node *node))
|
||||
{
|
||||
return __cpuhp_setup_state(state, name, false,
|
||||
(void *) startup,
|
||||
(void *) teardown, true);
|
||||
}
|
||||
|
||||
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
|
||||
bool invoke);
|
||||
|
||||
/**
|
||||
* cpuhp_state_add_instance - Add an instance for a state and invoke startup
|
||||
* callback.
|
||||
* @state: The state for which the instance is installed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Installs the instance for the @state and invokes the startup callback on
|
||||
* the present cpus which have already reached the @state. The @state must have
|
||||
* been earlier marked as multi-instance by @cpuhp_setup_state_multi.
|
||||
*/
|
||||
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
{
|
||||
return __cpuhp_state_add_instance(state, node, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_state_add_instance_nocalls - Add an instance for a state without
|
||||
* invoking the startup callback.
|
||||
* @state: The state for which the instance is installed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Installs the instance for the @state The @state must have been earlier
|
||||
* marked as multi-instance by @cpuhp_setup_state_multi.
|
||||
*/
|
||||
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
{
|
||||
return __cpuhp_state_add_instance(state, node, false);
|
||||
}
|
||||
|
||||
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
|
||||
@@ -162,6 +252,51 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
|
||||
__cpuhp_remove_state(state, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_remove_multi_state - Remove hotplug multi state callback
|
||||
* @state: The state for which the calls are removed
|
||||
*
|
||||
* Removes the callback functions from a multi state. This is the reverse of
|
||||
* cpuhp_setup_state_multi(). All instances should have been removed before
|
||||
* invoking this function.
|
||||
*/
|
||||
static inline void cpuhp_remove_multi_state(enum cpuhp_state state)
|
||||
{
|
||||
__cpuhp_remove_state(state, false);
|
||||
}
|
||||
|
||||
int __cpuhp_state_remove_instance(enum cpuhp_state state,
|
||||
struct hlist_node *node, bool invoke);
|
||||
|
||||
/**
|
||||
* cpuhp_state_remove_instance - Remove hotplug instance from state and invoke
|
||||
* the teardown callback
|
||||
* @state: The state from which the instance is removed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Removes the instance and invokes the teardown callback on the present cpus
|
||||
* which have already reached the @state.
|
||||
*/
|
||||
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
{
|
||||
return __cpuhp_state_remove_instance(state, node, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
|
||||
* without invoking the reatdown callback
|
||||
* @state: The state from which the instance is removed
|
||||
* @node: The node for this individual state.
|
||||
*
|
||||
* Removes the instance without invoking the teardown callback.
|
||||
*/
|
||||
static inline int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state,
|
||||
struct hlist_node *node)
|
||||
{
|
||||
return __cpuhp_state_remove_instance(state, node, false);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void cpuhp_online_idle(enum cpuhp_state state);
|
||||
#else
|
||||
|
@@ -26,15 +26,10 @@ struct inode;
|
||||
/*
|
||||
* COW Supplementary groups list
|
||||
*/
|
||||
#define NGROUPS_SMALL 32
|
||||
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(kgid_t)))
|
||||
|
||||
struct group_info {
|
||||
atomic_t usage;
|
||||
int ngroups;
|
||||
int nblocks;
|
||||
kgid_t small_block[NGROUPS_SMALL];
|
||||
kgid_t *blocks[0];
|
||||
kgid_t gid[0];
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -88,10 +83,6 @@ extern void set_groups(struct cred *, struct group_info *);
|
||||
extern int groups_search(const struct group_info *, kgid_t);
|
||||
extern bool may_setgroups(void);
|
||||
|
||||
/* access the groups "array" with this macro */
|
||||
#define GROUP_AT(gi, i) \
|
||||
((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
|
||||
|
||||
/*
|
||||
* The security context of a task
|
||||
*
|
||||
|
@@ -22,7 +22,10 @@ extern const unsigned char _ctype[];
|
||||
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
|
||||
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
|
||||
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
|
||||
#define isdigit(c) ((__ismask(c)&(_D)) != 0)
|
||||
static inline int isdigit(int c)
|
||||
{
|
||||
return '0' <= c && c <= '9';
|
||||
}
|
||||
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
|
||||
#define islower(c) ((__ismask(c)&(_L)) != 0)
|
||||
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
|
||||
|
@@ -6,13 +6,19 @@
|
||||
#include <linux/radix-tree.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
struct iomap_ops;
|
||||
|
||||
/* We use lowest available exceptional entry bit for locking */
|
||||
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
|
||||
|
||||
ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
struct iomap_ops *ops);
|
||||
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
|
||||
get_block_t, dio_iodone_t, int flags);
|
||||
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
|
||||
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
|
||||
int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
struct iomap_ops *ops);
|
||||
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
||||
|
@@ -584,9 +584,10 @@ static inline struct dentry *d_real(struct dentry *dentry,
|
||||
* If dentry is on an union/overlay, then return the underlying, real inode.
|
||||
* Otherwise return d_inode().
|
||||
*/
|
||||
static inline struct inode *d_real_inode(struct dentry *dentry)
|
||||
static inline struct inode *d_real_inode(const struct dentry *dentry)
|
||||
{
|
||||
return d_backing_inode(d_real(dentry, NULL, 0));
|
||||
/* This usage of d_real() results in const dentry */
|
||||
return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
|
||||
}
|
||||
|
||||
|
||||
|
@@ -45,6 +45,23 @@ extern struct dentry *arch_debugfs_dir;
|
||||
|
||||
extern struct srcu_struct debugfs_srcu;
|
||||
|
||||
/**
|
||||
* debugfs_real_fops - getter for the real file operation
|
||||
* @filp: a pointer to a struct file
|
||||
*
|
||||
* Must only be called under the protection established by
|
||||
* debugfs_use_file_start().
|
||||
*/
|
||||
static inline const struct file_operations *debugfs_real_fops(struct file *filp)
|
||||
__must_hold(&debugfs_srcu)
|
||||
{
|
||||
/*
|
||||
* Neither the pointer to the struct file_operations, nor its
|
||||
* contents ever change -- srcu_dereference() is not needed here.
|
||||
*/
|
||||
return filp->f_path.dentry->d_fsdata;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
struct dentry *debugfs_create_file(const char *name, umode_t mode,
|
||||
|
@@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
|
||||
struct device *dev, int index)
|
||||
{
|
||||
|
@@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state;
|
||||
#define DM_MAPIO_SUBMITTED 0
|
||||
#define DM_MAPIO_REMAPPED 1
|
||||
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
|
||||
#define DM_MAPIO_DELAY_REQUEUE 3
|
||||
|
||||
#define dm_sector_div64(x, y)( \
|
||||
{ \
|
||||
|
@@ -41,6 +41,7 @@ struct device_node;
|
||||
struct fwnode_handle;
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
struct iommu_fwspec;
|
||||
|
||||
struct bus_attribute {
|
||||
struct attribute attr;
|
||||
@@ -765,6 +766,7 @@ struct device_dma_parameters {
|
||||
* gone away. This should be set by the allocator of the
|
||||
* device (i.e. the bus driver that discovered the device).
|
||||
* @iommu_group: IOMMU group the device belongs to.
|
||||
* @iommu_fwspec: IOMMU-specific properties supplied by firmware.
|
||||
*
|
||||
* @offline_disabled: If set, the device is permanently online.
|
||||
* @offline: Set after successful invocation of bus type's .offline().
|
||||
@@ -849,6 +851,7 @@ struct device {
|
||||
|
||||
void (*release)(struct device *dev);
|
||||
struct iommu_group *iommu_group;
|
||||
struct iommu_fwspec *iommu_fwspec;
|
||||
|
||||
bool offline_disabled:1;
|
||||
bool offline:1;
|
||||
|
@@ -56,6 +56,13 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
extern void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *virt, dma_addr_t addr);
|
||||
|
||||
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr);
|
||||
|
||||
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
int direction);
|
||||
@@ -141,6 +148,18 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_unmap_resource(struct device *dev,
|
||||
dma_addr_t dma_addr, size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
|
@@ -21,6 +21,7 @@
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
int iommu_dma_init(void);
|
||||
|
||||
@@ -29,7 +30,8 @@ int iommu_get_dma_cookie(struct iommu_domain *domain);
|
||||
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
|
||||
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
u64 size, struct device *dev);
|
||||
|
||||
/* General helpers for DMA-API <-> IOMMU-API interaction */
|
||||
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
|
||||
@@ -62,9 +64,13 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
int iommu_dma_supported(struct device *dev, u64 mask);
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
|
||||
|
||||
#else
|
||||
|
||||
struct iommu_domain;
|
||||
struct msi_msg;
|
||||
|
||||
static inline int iommu_dma_init(void)
|
||||
{
|
||||
@@ -80,6 +86,10 @@ static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __DMA_IOMMU_H */
|
||||
|
@@ -56,6 +56,11 @@
|
||||
* that gives better TLB efficiency.
|
||||
*/
|
||||
#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
|
||||
/*
|
||||
* DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
|
||||
* allocation failure reports (similarly to __GFP_NOWARN).
|
||||
*/
|
||||
#define DMA_ATTR_NO_WARN (1UL << 8)
|
||||
|
||||
/*
|
||||
* A dma_addr_t can hold any valid DMA or bus address for the platform.
|
||||
@@ -95,6 +100,12 @@ struct dma_map_ops {
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
void (*sync_single_for_cpu)(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
@@ -258,6 +269,41 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
debug_dma_unmap_page(dev, addr, size, dir, false);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
/* Don't allow RAM to be mapped */
|
||||
BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
|
||||
|
||||
addr = phys_addr;
|
||||
if (ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_resource(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
@@ -718,7 +764,7 @@ static inline int dma_mmap_wc(struct device *dev,
|
||||
#define dma_mmap_writecombine dma_mmap_wc
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NEED_DMA_MAP_STATE
|
||||
#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
|
||||
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
|
||||
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
|
||||
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
|
||||
|
@@ -40,8 +40,13 @@ struct dw_dma_chip {
|
||||
};
|
||||
|
||||
/* Export to the platform drivers */
|
||||
#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
|
||||
int dw_dma_probe(struct dw_dma_chip *chip);
|
||||
int dw_dma_remove(struct dw_dma_chip *chip);
|
||||
#else
|
||||
static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
|
||||
static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
|
||||
#endif /* CONFIG_DW_DMAC_CORE */
|
||||
|
||||
/* DMA API extensions */
|
||||
struct dw_desc;
|
||||
|
@@ -41,8 +41,7 @@ struct hsu_dma_chip {
|
||||
/* Export to the internal users */
|
||||
int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
|
||||
u32 *status);
|
||||
irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
|
||||
u32 status);
|
||||
int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status);
|
||||
|
||||
/* Export to the platform drivers */
|
||||
int hsu_dma_probe(struct hsu_dma_chip *chip);
|
||||
@@ -53,10 +52,10 @@ static inline int hsu_dma_get_status(struct hsu_dma_chip *chip,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip,
|
||||
unsigned short nr, u32 status)
|
||||
static inline int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
|
||||
u32 status)
|
||||
{
|
||||
return IRQ_NONE;
|
||||
return 0;
|
||||
}
|
||||
static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; }
|
||||
static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; }
|
||||
|
@@ -441,6 +441,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
|
||||
|
||||
typedef void (*dma_async_tx_callback)(void *dma_async_param);
|
||||
|
||||
enum dmaengine_tx_result {
|
||||
DMA_TRANS_NOERROR = 0, /* SUCCESS */
|
||||
DMA_TRANS_READ_FAILED, /* Source DMA read failed */
|
||||
DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
|
||||
DMA_TRANS_ABORTED, /* Op never submitted / aborted */
|
||||
};
|
||||
|
||||
struct dmaengine_result {
|
||||
enum dmaengine_tx_result result;
|
||||
u32 residue;
|
||||
};
|
||||
|
||||
typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
|
||||
const struct dmaengine_result *result);
|
||||
|
||||
struct dmaengine_unmap_data {
|
||||
u8 map_cnt;
|
||||
u8 to_cnt;
|
||||
@@ -478,6 +493,7 @@ struct dma_async_tx_descriptor {
|
||||
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
|
||||
int (*desc_free)(struct dma_async_tx_descriptor *tx);
|
||||
dma_async_tx_callback callback;
|
||||
dma_async_tx_callback_result callback_result;
|
||||
void *callback_param;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/pstore.h>
|
||||
#include <linux/range.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/screen_info.h>
|
||||
@@ -37,6 +38,7 @@
|
||||
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
|
||||
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
|
||||
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
|
||||
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
|
||||
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
|
||||
|
||||
typedef unsigned long efi_status_t;
|
||||
@@ -118,6 +120,15 @@ typedef struct {
|
||||
u32 imagesize;
|
||||
} efi_capsule_header_t;
|
||||
|
||||
struct efi_boot_memmap {
|
||||
efi_memory_desc_t **map;
|
||||
unsigned long *map_size;
|
||||
unsigned long *desc_size;
|
||||
u32 *desc_ver;
|
||||
unsigned long *key_ptr;
|
||||
unsigned long *buff_size;
|
||||
};
|
||||
|
||||
/*
|
||||
* EFI capsule flags
|
||||
*/
|
||||
@@ -669,6 +680,18 @@ typedef struct {
|
||||
unsigned long tables;
|
||||
} efi_system_table_t;
|
||||
|
||||
/*
|
||||
* Architecture independent structure for describing a memory map for the
|
||||
* benefit of efi_memmap_init_early(), saving us the need to pass four
|
||||
* parameters.
|
||||
*/
|
||||
struct efi_memory_map_data {
|
||||
phys_addr_t phys_map;
|
||||
unsigned long size;
|
||||
unsigned long desc_version;
|
||||
unsigned long desc_size;
|
||||
};
|
||||
|
||||
struct efi_memory_map {
|
||||
phys_addr_t phys_map;
|
||||
void *map;
|
||||
@@ -676,6 +699,12 @@ struct efi_memory_map {
|
||||
int nr_map;
|
||||
unsigned long desc_version;
|
||||
unsigned long desc_size;
|
||||
bool late;
|
||||
};
|
||||
|
||||
struct efi_mem_range {
|
||||
struct range range;
|
||||
u64 attribute;
|
||||
};
|
||||
|
||||
struct efi_fdt_params {
|
||||
@@ -900,6 +929,16 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
|
||||
}
|
||||
#endif
|
||||
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
|
||||
|
||||
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
|
||||
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
|
||||
extern void __init efi_memmap_unmap(void);
|
||||
extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
|
||||
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
|
||||
struct range *range);
|
||||
extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
|
||||
void *buf, struct efi_mem_range *mem);
|
||||
|
||||
extern int efi_config_init(efi_config_table_type_t *arch_tables);
|
||||
#ifdef CONFIG_EFI_ESRT
|
||||
extern void __init efi_esrt_init(void);
|
||||
@@ -915,6 +954,7 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
|
||||
extern int __init efi_uart_console_only (void);
|
||||
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
|
||||
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
|
||||
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
|
||||
extern void efi_initialize_iomem_resources(struct resource *code_resource,
|
||||
struct resource *data_resource, struct resource *bss_resource);
|
||||
extern void efi_reserve_boot_services(void);
|
||||
@@ -946,7 +986,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
|
||||
/* Iterate through an efi_memory_map */
|
||||
#define for_each_efi_memory_desc_in_map(m, md) \
|
||||
for ((md) = (m)->map; \
|
||||
((void *)(md) + (m)->desc_size) <= (m)->map_end; \
|
||||
(md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
|
||||
(md) = (void *)(md) + (m)->desc_size)
|
||||
|
||||
/**
|
||||
@@ -1127,12 +1167,6 @@ struct efivar_operations {
|
||||
};
|
||||
|
||||
struct efivars {
|
||||
/*
|
||||
* ->lock protects two things:
|
||||
* 1) efivarfs_list and efivars_sysfs_list
|
||||
* 2) ->ops calls
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct kset *kset;
|
||||
struct kobject *kobject;
|
||||
const struct efivar_operations *ops;
|
||||
@@ -1273,8 +1307,8 @@ struct kobject *efivars_kobject(void);
|
||||
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
||||
void *data, bool duplicates, struct list_head *head);
|
||||
|
||||
void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
|
||||
void efivar_entry_remove(struct efivar_entry *entry);
|
||||
int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
|
||||
int efivar_entry_remove(struct efivar_entry *entry);
|
||||
|
||||
int __efivar_entry_delete(struct efivar_entry *entry);
|
||||
int efivar_entry_delete(struct efivar_entry *entry);
|
||||
@@ -1291,7 +1325,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
|
||||
int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
|
||||
bool block, unsigned long size, void *data);
|
||||
|
||||
void efivar_entry_iter_begin(void);
|
||||
int efivar_entry_iter_begin(void);
|
||||
void efivar_entry_iter_end(void);
|
||||
|
||||
int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
|
||||
@@ -1327,7 +1361,6 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule,
|
||||
|
||||
#ifdef CONFIG_EFI_RUNTIME_MAP
|
||||
int efi_runtime_map_init(struct kobject *);
|
||||
void efi_runtime_map_setup(void *, int, u32);
|
||||
int efi_get_runtime_map_size(void);
|
||||
int efi_get_runtime_map_desc_size(void);
|
||||
int efi_runtime_map_copy(void *buf, size_t bufsz);
|
||||
@@ -1337,9 +1370,6 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
|
||||
|
||||
static inline int efi_get_runtime_map_size(void)
|
||||
{
|
||||
return 0;
|
||||
@@ -1371,11 +1401,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
|
||||
efi_loaded_image_t *image, int *cmd_line_len);
|
||||
|
||||
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
|
||||
efi_memory_desc_t **map,
|
||||
unsigned long *map_size,
|
||||
unsigned long *desc_size,
|
||||
u32 *desc_ver,
|
||||
unsigned long *key_ptr);
|
||||
struct efi_boot_memmap *map);
|
||||
|
||||
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
|
||||
unsigned long size, unsigned long align,
|
||||
@@ -1457,4 +1483,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
|
||||
arch_efi_call_virt_teardown(); \
|
||||
})
|
||||
|
||||
typedef efi_status_t (*efi_exit_boot_map_processing)(
|
||||
efi_system_table_t *sys_table_arg,
|
||||
struct efi_boot_memmap *map,
|
||||
void *priv);
|
||||
|
||||
efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
|
||||
void *handle,
|
||||
struct efi_boot_memmap *map,
|
||||
void *priv,
|
||||
efi_exit_boot_map_processing priv_func);
|
||||
#endif /* _LINUX_EFI_H */
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#ifndef _LINUX_EXPORT_H
|
||||
#define _LINUX_EXPORT_H
|
||||
|
||||
/*
|
||||
* Export symbols from the kernel to modules. Forked from module.h
|
||||
* to reduce the amount of pointless cruft we feed to gcc when only
|
||||
@@ -42,27 +43,26 @@ extern struct module __this_module;
|
||||
#ifdef CONFIG_MODVERSIONS
|
||||
/* Mark the CRC weak since genksyms apparently decides not to
|
||||
* generate a checksums for some symbols */
|
||||
#define __CRC_SYMBOL(sym, sec) \
|
||||
extern __visible void *__crc_##sym __attribute__((weak)); \
|
||||
static const unsigned long __kcrctab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___kcrctab" sec "+" #sym), unused)) \
|
||||
#define __CRC_SYMBOL(sym, sec) \
|
||||
extern __visible void *__crc_##sym __attribute__((weak)); \
|
||||
static const unsigned long __kcrctab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___kcrctab" sec "+" #sym), used)) \
|
||||
= (unsigned long) &__crc_##sym;
|
||||
#else
|
||||
#define __CRC_SYMBOL(sym, sec)
|
||||
#endif
|
||||
|
||||
/* For every exported symbol, place a struct in the __ksymtab section */
|
||||
#define ___EXPORT_SYMBOL(sym, sec) \
|
||||
extern typeof(sym) sym; \
|
||||
__CRC_SYMBOL(sym, sec) \
|
||||
static const char __kstrtab_##sym[] \
|
||||
__attribute__((section("__ksymtab_strings"), aligned(1))) \
|
||||
= VMLINUX_SYMBOL_STR(sym); \
|
||||
extern const struct kernel_symbol __ksymtab_##sym; \
|
||||
__visible const struct kernel_symbol __ksymtab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___ksymtab" sec "+" #sym), unused)) \
|
||||
#define ___EXPORT_SYMBOL(sym, sec) \
|
||||
extern typeof(sym) sym; \
|
||||
__CRC_SYMBOL(sym, sec) \
|
||||
static const char __kstrtab_##sym[] \
|
||||
__attribute__((section("__ksymtab_strings"), aligned(1))) \
|
||||
= VMLINUX_SYMBOL_STR(sym); \
|
||||
static const struct kernel_symbol __ksymtab_##sym \
|
||||
__used \
|
||||
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
|
||||
= { (unsigned long)&sym, __kstrtab_##sym }
|
||||
|
||||
#if defined(__KSYM_DEPS__)
|
||||
@@ -78,7 +78,6 @@ extern struct module __this_module;
|
||||
|
||||
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
|
||||
|
||||
#include <linux/kconfig.h>
|
||||
#include <generated/autoksyms.h>
|
||||
|
||||
#define __EXPORT_SYMBOL(sym, sec) \
|
||||
|
@@ -157,12 +157,13 @@ struct fid {
|
||||
* @fh_to_dentry is given a &struct super_block (@sb) and a file handle
|
||||
* fragment (@fh, @fh_len). It should return a &struct dentry which refers
|
||||
* to the same file that the file handle fragment refers to. If it cannot,
|
||||
* it should return a %NULL pointer if the file was found but no acceptable
|
||||
* &dentries were available, or an %ERR_PTR error code indicating why it
|
||||
* couldn't be found (e.g. %ENOENT or %ENOMEM). Any suitable dentry can be
|
||||
* returned including, if necessary, a new dentry created with d_alloc_root.
|
||||
* The caller can then find any other extant dentries by following the
|
||||
* d_alias links.
|
||||
* it should return a %NULL pointer if the file cannot be found, or an
|
||||
* %ERR_PTR error code of %ENOMEM if a memory allocation failure occurred.
|
||||
* Any other error code is treated like %NULL, and will cause an %ESTALE error
|
||||
* for callers of exportfs_decode_fh().
|
||||
* Any suitable dentry can be returned including, if necessary, a new dentry
|
||||
* created with d_alloc_root. The caller can then find any other extant
|
||||
* dentries by following the d_alias links.
|
||||
*
|
||||
* fh_to_parent:
|
||||
* Same as @fh_to_dentry, except that it returns a pointer to the parent
|
||||
|
@@ -28,6 +28,15 @@
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
/*
|
||||
* Define the type of supported external connectors
|
||||
*/
|
||||
#define EXTCON_TYPE_USB BIT(0) /* USB connector */
|
||||
#define EXTCON_TYPE_CHG BIT(1) /* Charger connector */
|
||||
#define EXTCON_TYPE_JACK BIT(2) /* Jack connector */
|
||||
#define EXTCON_TYPE_DISP BIT(3) /* Display connector */
|
||||
#define EXTCON_TYPE_MISC BIT(4) /* Miscellaneous connector */
|
||||
|
||||
/*
|
||||
* Define the unique id of supported external connectors
|
||||
*/
|
||||
@@ -44,6 +53,7 @@
|
||||
#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */
|
||||
#define EXTCON_CHG_USB_FAST 9
|
||||
#define EXTCON_CHG_USB_SLOW 10
|
||||
#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */
|
||||
|
||||
/* Jack external connector */
|
||||
#define EXTCON_JACK_MICROPHONE 20
|
||||
@@ -60,6 +70,8 @@
|
||||
#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */
|
||||
#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */
|
||||
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
|
||||
#define EXTCON_DISP_DP 44 /* Display Port */
|
||||
#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
|
||||
|
||||
/* Miscellaneous external connector */
|
||||
#define EXTCON_DOCK 60
|
||||
@@ -68,6 +80,85 @@
|
||||
|
||||
#define EXTCON_NUM 63
|
||||
|
||||
/*
|
||||
* Define the property of supported external connectors.
|
||||
*
|
||||
* When adding the new extcon property, they *must* have
|
||||
* the type/value/default information. Also, you *have to*
|
||||
* modify the EXTCON_PROP_[type]_START/END definitions
|
||||
* which mean the range of the supported properties
|
||||
* for each extcon type.
|
||||
*
|
||||
* The naming style of property
|
||||
* : EXTCON_PROP_[type]_[property name]
|
||||
*
|
||||
* EXTCON_PROP_USB_[property name] : USB property
|
||||
* EXTCON_PROP_CHG_[property name] : Charger property
|
||||
* EXTCON_PROP_JACK_[property name] : Jack property
|
||||
* EXTCON_PROP_DISP_[property name] : Display property
|
||||
*/
|
||||
|
||||
/*
|
||||
* Properties of EXTCON_TYPE_USB.
|
||||
*
|
||||
* - EXTCON_PROP_USB_VBUS
|
||||
* @type: integer (intval)
|
||||
* @value: 0 (low) or 1 (high)
|
||||
* @default: 0 (low)
|
||||
* - EXTCON_PROP_USB_TYPEC_POLARITY
|
||||
* @type: integer (intval)
|
||||
* @value: 0 (normal) or 1 (flip)
|
||||
* @default: 0 (normal)
|
||||
* - EXTCON_PROP_USB_SS (SuperSpeed)
|
||||
* @type: integer (intval)
|
||||
* @value: 0 (USB/USB2) or 1 (USB3)
|
||||
* @default: 0 (USB/USB2)
|
||||
*
|
||||
*/
|
||||
#define EXTCON_PROP_USB_VBUS 0
|
||||
#define EXTCON_PROP_USB_TYPEC_POLARITY 1
|
||||
#define EXTCON_PROP_USB_SS 2
|
||||
|
||||
#define EXTCON_PROP_USB_MIN 0
|
||||
#define EXTCON_PROP_USB_MAX 2
|
||||
#define EXTCON_PROP_USB_CNT (EXTCON_PROP_USB_MAX - EXTCON_PROP_USB_MIN + 1)
|
||||
|
||||
/* Properties of EXTCON_TYPE_CHG. */
|
||||
#define EXTCON_PROP_CHG_MIN 50
|
||||
#define EXTCON_PROP_CHG_MAX 50
|
||||
#define EXTCON_PROP_CHG_CNT (EXTCON_PROP_CHG_MAX - EXTCON_PROP_CHG_MIN + 1)
|
||||
|
||||
/* Properties of EXTCON_TYPE_JACK. */
|
||||
#define EXTCON_PROP_JACK_MIN 100
|
||||
#define EXTCON_PROP_JACK_MAX 100
|
||||
#define EXTCON_PROP_JACK_CNT (EXTCON_PROP_JACK_MAX - EXTCON_PROP_JACK_MIN + 1)
|
||||
|
||||
/*
|
||||
* Properties of EXTCON_TYPE_DISP.
|
||||
*
|
||||
* - EXTCON_PROP_DISP_HPD (Hot Plug Detect)
|
||||
* @type: integer (intval)
|
||||
* @value: 0 (no hpd) or 1 (hpd)
|
||||
* @default: 0 (no hpd)
|
||||
*
|
||||
*/
|
||||
#define EXTCON_PROP_DISP_HPD 150
|
||||
|
||||
/* Properties of EXTCON_TYPE_DISP. */
|
||||
#define EXTCON_PROP_DISP_MIN 150
|
||||
#define EXTCON_PROP_DISP_MAX 151
|
||||
#define EXTCON_PROP_DISP_CNT (EXTCON_PROP_DISP_MAX - EXTCON_PROP_DISP_MIN + 1)
|
||||
|
||||
/*
|
||||
* Define the type of property's value.
|
||||
*
|
||||
* Define the property's value as union type. Because each property
|
||||
* would need the different data type to store it.
|
||||
*/
|
||||
union extcon_property_value {
|
||||
int intval; /* type : integer (intval) */
|
||||
};
|
||||
|
||||
struct extcon_cable;
|
||||
|
||||
/**
|
||||
@@ -150,26 +241,42 @@ extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
|
||||
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
|
||||
|
||||
/*
|
||||
* get/set/update_state access the 32b encoded state value, which represents
|
||||
* states of all possible cables of the multistate port. For example, if one
|
||||
* calls extcon_set_state(edev, 0x7), it may mean that all the three cables
|
||||
* are attached to the port.
|
||||
*/
|
||||
static inline u32 extcon_get_state(struct extcon_dev *edev)
|
||||
{
|
||||
return edev->state;
|
||||
}
|
||||
|
||||
extern int extcon_set_state(struct extcon_dev *edev, u32 state);
|
||||
extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
|
||||
|
||||
/*
|
||||
* get/set_cable_state access each bit of the 32b encoded state value.
|
||||
* get/set_state access each bit of the 32b encoded state value.
|
||||
* They are used to access the status of each cable based on the cable id.
|
||||
*/
|
||||
extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
|
||||
extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
|
||||
extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
|
||||
extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
|
||||
bool cable_state);
|
||||
extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
|
||||
bool cable_state);
|
||||
/*
|
||||
* Synchronize the state and property data for a specific external connector.
|
||||
*/
|
||||
extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
|
||||
|
||||
/*
|
||||
* get/set_property access the property value of each external connector.
|
||||
* They are used to access the property of each cable based on the property id.
|
||||
*/
|
||||
extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value *prop_val);
|
||||
extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val);
|
||||
extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val);
|
||||
|
||||
/*
|
||||
* get/set_property_capability set the capability of the property for each
|
||||
* external connector. They are used to set the capability of the property
|
||||
* of each external connector based on the id and property.
|
||||
*/
|
||||
extern int extcon_get_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop);
|
||||
extern int extcon_set_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop);
|
||||
|
||||
/*
|
||||
* Following APIs are to monitor every action of a notifier.
|
||||
@@ -232,30 +339,57 @@ static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
|
||||
|
||||
static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
|
||||
|
||||
static inline u32 extcon_get_state(struct extcon_dev *edev)
|
||||
|
||||
static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_state(struct extcon_dev *edev, u32 state)
|
||||
static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
|
||||
bool cable_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
|
||||
u32 state)
|
||||
static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
|
||||
bool cable_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_get_cable_state_(struct extcon_dev *edev,
|
||||
unsigned int id)
|
||||
static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_cable_state_(struct extcon_dev *edev,
|
||||
unsigned int id, bool cable_state)
|
||||
static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value *prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned int prop,
|
||||
union extcon_property_value prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_property_sync(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop,
|
||||
union extcon_property_value prop_val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_get_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int extcon_set_property_capability(struct extcon_dev *edev,
|
||||
unsigned int id, unsigned int prop)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -320,4 +454,15 @@ static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id)
|
||||
{
|
||||
return extcon_get_state(edev, id);
|
||||
}
|
||||
|
||||
static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
|
||||
bool cable_state)
|
||||
{
|
||||
return extcon_set_state_sync(edev, id, cable_state);
|
||||
}
|
||||
#endif /* __LINUX_EXTCON_H__ */
|
||||
|
@@ -20,8 +20,8 @@
|
||||
|
||||
/**
|
||||
* struct adc_jack_cond - condition to use an extcon state
|
||||
* @state: the corresponding extcon state (if 0, this struct
|
||||
* denotes the last adc_jack_cond element among the array)
|
||||
* @id: the unique id of each external connector
|
||||
* @min_adc: min adc value for this condition
|
||||
* @max_adc: max adc value for this condition
|
||||
*
|
||||
@@ -33,7 +33,7 @@
|
||||
* because when no adc_jack_cond is met, state = 0 is automatically chosen.
|
||||
*/
|
||||
struct adc_jack_cond {
|
||||
u32 state; /* extcon state value. 0 if invalid */
|
||||
unsigned int id;
|
||||
u32 min_adc;
|
||||
u32 max_adc;
|
||||
};
|
||||
|
@@ -100,6 +100,7 @@ struct f2fs_super_block {
|
||||
/*
|
||||
* For checkpoint
|
||||
*/
|
||||
#define CP_CRC_RECOVERY_FLAG 0x00000040
|
||||
#define CP_FASTBOOT_FLAG 0x00000020
|
||||
#define CP_FSCK_FLAG 0x00000010
|
||||
#define CP_ERROR_FLAG 0x00000008
|
||||
|
@@ -25,6 +25,7 @@ struct space_resv {
|
||||
FALLOC_FL_PUNCH_HOLE | \
|
||||
FALLOC_FL_COLLAPSE_RANGE | \
|
||||
FALLOC_FL_ZERO_RANGE | \
|
||||
FALLOC_FL_INSERT_RANGE)
|
||||
FALLOC_FL_INSERT_RANGE | \
|
||||
FALLOC_FL_UNSHARE_RANGE)
|
||||
|
||||
#endif /* _FALLOC_H_ */
|
||||
|
@@ -30,12 +30,12 @@ struct fdtable {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static inline bool close_on_exec(int fd, const struct fdtable *fdt)
|
||||
static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt)
|
||||
{
|
||||
return test_bit(fd, fdt->close_on_exec);
|
||||
}
|
||||
|
||||
static inline bool fd_is_open(int fd, const struct fdtable *fdt)
|
||||
static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
|
||||
{
|
||||
return test_bit(fd, fdt->open_fds);
|
||||
}
|
||||
@@ -57,7 +57,7 @@ struct files_struct {
|
||||
* written part on a separate cache line in SMP
|
||||
*/
|
||||
spinlock_t file_lock ____cacheline_aligned_in_smp;
|
||||
int next_fd;
|
||||
unsigned int next_fd;
|
||||
unsigned long close_on_exec_init[1];
|
||||
unsigned long open_fds_init[1];
|
||||
unsigned long full_fds_bits_init[1];
|
||||
@@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
|
||||
void put_files_struct(struct files_struct *fs);
|
||||
void reset_files_struct(struct files_struct *);
|
||||
int unshare_files(struct files_struct **);
|
||||
struct files_struct *dup_fd(struct files_struct *, int *);
|
||||
struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
|
||||
void do_close_on_exec(struct files_struct *);
|
||||
int iterate_fd(struct files_struct *, unsigned,
|
||||
int (*)(const void *, struct file *, unsigned),
|
||||
|
@@ -51,6 +51,16 @@ struct fence_array {
|
||||
|
||||
extern const struct fence_ops fence_array_ops;
|
||||
|
||||
/**
|
||||
* fence_is_array - check if a fence is from the array subsclass
|
||||
*
|
||||
* Return true if it is a fence_array and false otherwise.
|
||||
*/
|
||||
static inline bool fence_is_array(struct fence *fence)
|
||||
{
|
||||
return fence->ops == &fence_array_ops;
|
||||
}
|
||||
|
||||
/**
|
||||
* to_fence_array - cast a fence to a fence_array
|
||||
* @fence: fence to cast to a fence_array
|
||||
|
@@ -60,7 +60,7 @@ struct fence_cb;
|
||||
* implementer of the fence for its own purposes. Can be used in different
|
||||
* ways by different fence implementers, so do not rely on this.
|
||||
*
|
||||
* *) Since atomic bitops are used, this is not guaranteed to be the case.
|
||||
* Since atomic bitops are used, this is not guaranteed to be the case.
|
||||
* Particularly, if the bit was set, but fence_signal was called right
|
||||
* before this bit was set, it would have been able to set the
|
||||
* FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
|
||||
|
@@ -314,6 +314,70 @@ struct bpf_prog_aux;
|
||||
bpf_size; \
|
||||
})
|
||||
|
||||
#define BPF_SIZEOF(type) \
|
||||
({ \
|
||||
const int __size = bytes_to_bpf_size(sizeof(type)); \
|
||||
BUILD_BUG_ON(__size < 0); \
|
||||
__size; \
|
||||
})
|
||||
|
||||
#define BPF_FIELD_SIZEOF(type, field) \
|
||||
({ \
|
||||
const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
|
||||
BUILD_BUG_ON(__size < 0); \
|
||||
__size; \
|
||||
})
|
||||
|
||||
#define __BPF_MAP_0(m, v, ...) v
|
||||
#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
|
||||
#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
|
||||
#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
|
||||
#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
|
||||
#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
|
||||
|
||||
#define __BPF_REG_0(...) __BPF_PAD(5)
|
||||
#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
|
||||
#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
|
||||
#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
|
||||
#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
|
||||
#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
|
||||
|
||||
#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
|
||||
#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
|
||||
|
||||
#define __BPF_CAST(t, a) \
|
||||
(__force t) \
|
||||
(__force \
|
||||
typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
|
||||
(unsigned long)0, (t)0))) a
|
||||
#define __BPF_V void
|
||||
#define __BPF_N
|
||||
|
||||
#define __BPF_DECL_ARGS(t, a) t a
|
||||
#define __BPF_DECL_REGS(t, a) u64 a
|
||||
|
||||
#define __BPF_PAD(n) \
|
||||
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
|
||||
u64, __ur_3, u64, __ur_4, u64, __ur_5)
|
||||
|
||||
#define BPF_CALL_x(x, name, ...) \
|
||||
static __always_inline \
|
||||
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
|
||||
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
|
||||
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
|
||||
{ \
|
||||
return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
|
||||
} \
|
||||
static __always_inline \
|
||||
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
|
||||
|
||||
#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
|
||||
#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
|
||||
#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
|
||||
#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
|
||||
#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
|
||||
#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/* A struct sock_filter is architecture independent. */
|
||||
struct compat_sock_fprog {
|
||||
|
31
include/linux/firmware/meson/meson_sm.h
Normal file
31
include/linux/firmware/meson/meson_sm.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2016 Endless Mobile, Inc.
|
||||
* Author: Carlo Caione <carlo@endlessm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _MESON_SM_FW_H_
|
||||
#define _MESON_SM_FW_H_
|
||||
|
||||
enum {
|
||||
SM_EFUSE_READ,
|
||||
SM_EFUSE_WRITE,
|
||||
SM_EFUSE_USER_MAX,
|
||||
};
|
||||
|
||||
struct meson_sm_firmware;
|
||||
|
||||
int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
|
||||
u32 arg2, u32 arg3, u32 arg4);
|
||||
int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
|
||||
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
|
||||
int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, u32 arg1,
|
||||
u32 arg2, u32 arg3, u32 arg4);
|
||||
|
||||
#endif /* _MESON_SM_FW_H_ */
|
@@ -63,7 +63,7 @@ extern void __init files_maxfiles_init(void);
|
||||
|
||||
extern struct files_stat_struct files_stat;
|
||||
extern unsigned long get_max_files(void);
|
||||
extern int sysctl_nr_open;
|
||||
extern unsigned int sysctl_nr_open;
|
||||
extern struct inodes_stat_t inodes_stat;
|
||||
extern int leases_enable, lease_break_time;
|
||||
extern int sysctl_protected_symlinks;
|
||||
@@ -224,6 +224,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
#define ATTR_KILL_PRIV (1 << 14)
|
||||
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
|
||||
#define ATTR_TIMES_SET (1 << 16)
|
||||
#define ATTR_TOUCH (1 << 17)
|
||||
|
||||
/*
|
||||
* Whiteout is represented by a char device. The following constants define the
|
||||
@@ -439,8 +440,9 @@ struct address_space {
|
||||
unsigned long nrexceptional;
|
||||
pgoff_t writeback_index;/* writeback starts here */
|
||||
const struct address_space_operations *a_ops; /* methods */
|
||||
unsigned long flags; /* error bits/gfp mask */
|
||||
unsigned long flags; /* error bits */
|
||||
spinlock_t private_lock; /* for use by the address_space */
|
||||
gfp_t gfp_mask; /* implicit gfp mask for allocations */
|
||||
struct list_head private_list; /* ditto */
|
||||
void *private_data; /* ditto */
|
||||
} __attribute__((aligned(sizeof(long))));
|
||||
@@ -591,6 +593,7 @@ is_uncached_acl(struct posix_acl *acl)
|
||||
#define IOP_FASTPERM 0x0001
|
||||
#define IOP_LOOKUP 0x0002
|
||||
#define IOP_NOFOLLOW 0x0004
|
||||
#define IOP_XATTR 0x0008
|
||||
|
||||
/*
|
||||
* Keep mostly read-only and often accessed (especially for
|
||||
@@ -1064,6 +1067,18 @@ struct file_lock_context {
|
||||
|
||||
extern void send_sigio(struct fown_struct *fown, int fd, int band);
|
||||
|
||||
/*
|
||||
* Return the inode to use for locking
|
||||
*
|
||||
* For overlayfs this should be the overlay inode, not the real inode returned
|
||||
* by file_inode(). For any other fs file_inode(filp) and locks_inode(filp) are
|
||||
* equal.
|
||||
*/
|
||||
static inline struct inode *locks_inode(const struct file *f)
|
||||
{
|
||||
return f->f_path.dentry->d_inode;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FILE_LOCKING
|
||||
extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *);
|
||||
extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
|
||||
@@ -1251,7 +1266,7 @@ static inline struct dentry *file_dentry(const struct file *file)
|
||||
|
||||
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
|
||||
{
|
||||
return locks_lock_inode_wait(file_inode(filp), fl);
|
||||
return locks_lock_inode_wait(locks_inode(filp), fl);
|
||||
}
|
||||
|
||||
struct fasync_struct {
|
||||
@@ -1459,6 +1474,7 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
|
||||
}
|
||||
|
||||
extern struct timespec current_fs_time(struct super_block *sb);
|
||||
extern struct timespec current_time(struct inode *inode);
|
||||
|
||||
/*
|
||||
* Snapshotting support.
|
||||
@@ -1733,17 +1749,10 @@ struct inode_operations {
|
||||
int (*rmdir) (struct inode *,struct dentry *);
|
||||
int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
|
||||
int (*rename) (struct inode *, struct dentry *,
|
||||
struct inode *, struct dentry *);
|
||||
int (*rename2) (struct inode *, struct dentry *,
|
||||
struct inode *, struct dentry *, unsigned int);
|
||||
int (*setattr) (struct dentry *, struct iattr *);
|
||||
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
|
||||
int (*setxattr) (struct dentry *, struct inode *,
|
||||
const char *, const void *, size_t, int);
|
||||
ssize_t (*getxattr) (struct dentry *, struct inode *,
|
||||
const char *, void *, size_t);
|
||||
ssize_t (*listxattr) (struct dentry *, char *, size_t);
|
||||
int (*removexattr) (struct dentry *, const char *);
|
||||
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
|
||||
u64 len);
|
||||
int (*update_time)(struct inode *, struct timespec *, int);
|
||||
@@ -2006,7 +2015,6 @@ enum file_time_flags {
|
||||
S_VERSION = 8,
|
||||
};
|
||||
|
||||
extern bool atime_needs_update(const struct path *, struct inode *);
|
||||
extern void touch_atime(const struct path *);
|
||||
static inline void file_accessed(struct file *file)
|
||||
{
|
||||
@@ -2075,10 +2083,19 @@ struct super_block *sget(struct file_system_type *type,
|
||||
int (*test)(struct super_block *,void *),
|
||||
int (*set)(struct super_block *,void *),
|
||||
int flags, void *data);
|
||||
extern struct dentry *mount_pseudo(struct file_system_type *, char *,
|
||||
const struct super_operations *ops,
|
||||
const struct dentry_operations *dops,
|
||||
unsigned long);
|
||||
extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *,
|
||||
const struct super_operations *ops,
|
||||
const struct xattr_handler **xattr,
|
||||
const struct dentry_operations *dops,
|
||||
unsigned long);
|
||||
|
||||
static inline struct dentry *
|
||||
mount_pseudo(struct file_system_type *fs_type, char *name,
|
||||
const struct super_operations *ops,
|
||||
const struct dentry_operations *dops, unsigned long magic)
|
||||
{
|
||||
return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic);
|
||||
}
|
||||
|
||||
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
|
||||
#define fops_get(fops) \
|
||||
@@ -2155,7 +2172,7 @@ static inline int mandatory_lock(struct inode *ino)
|
||||
|
||||
static inline int locks_verify_locked(struct file *file)
|
||||
{
|
||||
if (mandatory_lock(file_inode(file)))
|
||||
if (mandatory_lock(locks_inode(file)))
|
||||
return locks_mandatory_locked(file);
|
||||
return 0;
|
||||
}
|
||||
@@ -2794,8 +2811,6 @@ extern void block_sync_page(struct page *page);
|
||||
/* fs/splice.c */
|
||||
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
|
||||
struct pipe_inode_info *, size_t, unsigned int);
|
||||
extern ssize_t default_file_splice_read(struct file *, loff_t *,
|
||||
struct pipe_inode_info *, size_t, unsigned int);
|
||||
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
|
||||
struct file *, loff_t *, size_t, unsigned int);
|
||||
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
|
||||
@@ -2919,6 +2934,7 @@ extern int vfs_stat(const char __user *, struct kstat *);
|
||||
extern int vfs_lstat(const char __user *, struct kstat *);
|
||||
extern int vfs_fstat(unsigned int, struct kstat *);
|
||||
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
|
||||
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
|
||||
|
||||
extern int __generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo,
|
||||
@@ -2950,7 +2966,8 @@ extern int simple_open(struct inode *inode, struct file *file);
|
||||
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
|
||||
extern int simple_unlink(struct inode *, struct dentry *);
|
||||
extern int simple_rmdir(struct inode *, struct dentry *);
|
||||
extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
|
||||
extern int simple_rename(struct inode *, struct dentry *,
|
||||
struct inode *, struct dentry *, unsigned int);
|
||||
extern int noop_fsync(struct file *, loff_t, loff_t, int);
|
||||
extern int simple_empty(struct dentry *);
|
||||
extern int simple_readpage(struct file *file, struct page *page);
|
||||
@@ -2995,7 +3012,7 @@ extern int buffer_migrate_page(struct address_space *,
|
||||
#define buffer_migrate_page NULL
|
||||
#endif
|
||||
|
||||
extern int inode_change_ok(const struct inode *, struct iattr *);
|
||||
extern int setattr_prepare(struct dentry *, struct iattr *);
|
||||
extern int inode_newsize_ok(const struct inode *, loff_t offset);
|
||||
extern void setattr_copy(struct inode *inode, const struct iattr *attr);
|
||||
|
||||
|
@@ -111,23 +111,6 @@ struct fscrypt_completion_result {
|
||||
struct fscrypt_completion_result ecr = { \
|
||||
COMPLETION_INITIALIZER((ecr).completion), 0 }
|
||||
|
||||
static inline int fscrypt_key_size(int mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case FS_ENCRYPTION_MODE_AES_256_XTS:
|
||||
return FS_AES_256_XTS_KEY_SIZE;
|
||||
case FS_ENCRYPTION_MODE_AES_256_GCM:
|
||||
return FS_AES_256_GCM_KEY_SIZE;
|
||||
case FS_ENCRYPTION_MODE_AES_256_CBC:
|
||||
return FS_AES_256_CBC_KEY_SIZE;
|
||||
case FS_ENCRYPTION_MODE_AES_256_CTS:
|
||||
return FS_AES_256_CTS_KEY_SIZE;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define FS_FNAME_NUM_SCATTER_ENTRIES 4
|
||||
#define FS_CRYPTO_BLOCK_SIZE 16
|
||||
#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
|
||||
@@ -202,13 +185,6 @@ static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
|
||||
return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
|
||||
}
|
||||
|
||||
static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
|
||||
{
|
||||
if (size == fscrypt_key_size(mode))
|
||||
return size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
|
||||
{
|
||||
if (str->len == 1 && str->name[0] == '.')
|
||||
@@ -274,8 +250,7 @@ extern void fscrypt_restore_control_page(struct page *);
|
||||
extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
|
||||
unsigned int);
|
||||
/* policy.c */
|
||||
extern int fscrypt_process_policy(struct inode *,
|
||||
const struct fscrypt_policy *);
|
||||
extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
|
||||
extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
|
||||
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
|
||||
extern int fscrypt_inherit_context(struct inode *, struct inode *,
|
||||
@@ -345,7 +320,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
|
||||
}
|
||||
|
||||
/* policy.c */
|
||||
static inline int fscrypt_notsupp_process_policy(struct inode *i,
|
||||
static inline int fscrypt_notsupp_process_policy(struct file *f,
|
||||
const struct fscrypt_policy *p)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
|
@@ -29,7 +29,11 @@ static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u3
|
||||
static inline int fsnotify_perm(struct file *file, int mask)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
struct inode *inode = file_inode(file);
|
||||
/*
|
||||
* Do not use file_inode() here or anywhere in this file to get the
|
||||
* inode. That would break *notity on overlayfs.
|
||||
*/
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
__u32 fsnotify_mask = 0;
|
||||
int ret;
|
||||
|
||||
@@ -173,7 +177,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
|
||||
static inline void fsnotify_access(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
__u32 mask = FS_ACCESS;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
@@ -191,7 +195,7 @@ static inline void fsnotify_access(struct file *file)
|
||||
static inline void fsnotify_modify(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
__u32 mask = FS_MODIFY;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
@@ -209,7 +213,7 @@ static inline void fsnotify_modify(struct file *file)
|
||||
static inline void fsnotify_open(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
__u32 mask = FS_OPEN;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
@@ -225,7 +229,7 @@ static inline void fsnotify_open(struct file *file)
|
||||
static inline void fsnotify_close(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
fmode_t mode = file->f_mode;
|
||||
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
|
||||
|
||||
|
@@ -135,7 +135,7 @@ struct fsnotify_group {
|
||||
const struct fsnotify_ops *ops; /* how this group handles things */
|
||||
|
||||
/* needed to send notification to userspace */
|
||||
struct mutex notification_mutex; /* protect the notification_list */
|
||||
spinlock_t notification_lock; /* protect the notification_list */
|
||||
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
|
||||
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
|
||||
unsigned int q_len; /* events on the queue */
|
||||
@@ -148,6 +148,7 @@ struct fsnotify_group {
|
||||
#define FS_PRIO_1 1 /* fanotify content based access control */
|
||||
#define FS_PRIO_2 2 /* fanotify pre-content access */
|
||||
unsigned int priority;
|
||||
bool shutdown; /* group is being shut down, don't queue more events */
|
||||
|
||||
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
|
||||
struct mutex mark_mutex; /* protect marks_list */
|
||||
@@ -176,10 +177,8 @@ struct fsnotify_group {
|
||||
struct fanotify_group_private_data {
|
||||
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
||||
/* allows a group to block waiting for a userspace response */
|
||||
spinlock_t access_lock;
|
||||
struct list_head access_list;
|
||||
wait_queue_head_t access_waitq;
|
||||
atomic_t bypass_perm;
|
||||
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
|
||||
int f_flags;
|
||||
unsigned int max_marks;
|
||||
@@ -292,6 +291,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
|
||||
extern void fsnotify_get_group(struct fsnotify_group *group);
|
||||
/* drop reference on a group from fsnotify_alloc_group */
|
||||
extern void fsnotify_put_group(struct fsnotify_group *group);
|
||||
/* group destruction begins, stop queuing new events */
|
||||
extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
|
||||
/* destroy group */
|
||||
extern void fsnotify_destroy_group(struct fsnotify_group *group);
|
||||
/* fasync handler function */
|
||||
@@ -304,8 +305,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *));
|
||||
/* Remove passed event from groups notification queue */
|
||||
extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
|
||||
/* true if the group notification queue is empty */
|
||||
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
|
||||
/* return, but do not dequeue the first event on the notification queue */
|
||||
|
@@ -794,8 +794,15 @@ struct ftrace_ret_stack {
|
||||
unsigned long ret;
|
||||
unsigned long func;
|
||||
unsigned long long calltime;
|
||||
#ifdef CONFIG_FUNCTION_PROFILER
|
||||
unsigned long long subtime;
|
||||
#endif
|
||||
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
unsigned long fp;
|
||||
#endif
|
||||
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
unsigned long *retp;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -807,7 +814,10 @@ extern void return_to_handler(void);
|
||||
|
||||
extern int
|
||||
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
|
||||
unsigned long frame_pointer);
|
||||
unsigned long frame_pointer, unsigned long *retp);
|
||||
|
||||
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
|
||||
unsigned long ret, unsigned long *retp);
|
||||
|
||||
/*
|
||||
* Sometimes we don't want to trace a function with the function
|
||||
@@ -870,6 +880,13 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
|
||||
unsigned long *retp)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void pause_graph_tracing(void) { }
|
||||
static inline void unpause_graph_tracing(void) { }
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
@@ -3,11 +3,34 @@
|
||||
|
||||
|
||||
#ifdef CONFIG_FTRACE_NMI_ENTER
|
||||
extern void ftrace_nmi_enter(void);
|
||||
extern void ftrace_nmi_exit(void);
|
||||
extern void arch_ftrace_nmi_enter(void);
|
||||
extern void arch_ftrace_nmi_exit(void);
|
||||
#else
|
||||
static inline void ftrace_nmi_enter(void) { }
|
||||
static inline void ftrace_nmi_exit(void) { }
|
||||
static inline void arch_ftrace_nmi_enter(void) { }
|
||||
static inline void arch_ftrace_nmi_exit(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HWLAT_TRACER
|
||||
extern bool trace_hwlat_callback_enabled;
|
||||
extern void trace_hwlat_callback(bool enter);
|
||||
#endif
|
||||
|
||||
static inline void ftrace_nmi_enter(void)
|
||||
{
|
||||
#ifdef CONFIG_HWLAT_TRACER
|
||||
if (trace_hwlat_callback_enabled)
|
||||
trace_hwlat_callback(true);
|
||||
#endif
|
||||
arch_ftrace_nmi_enter();
|
||||
}
|
||||
|
||||
static inline void ftrace_nmi_exit(void)
|
||||
{
|
||||
arch_ftrace_nmi_exit();
|
||||
#ifdef CONFIG_HWLAT_TRACER
|
||||
if (trace_hwlat_callback_enabled)
|
||||
trace_hwlat_callback(false);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FTRACE_IRQ_H */
|
||||
|
@@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
|
||||
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
|
||||
|
||||
/* drivers/char/random.c */
|
||||
extern void add_disk_randomness(struct gendisk *disk);
|
||||
extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
|
||||
extern void rand_initialize_disk(struct gendisk *disk);
|
||||
|
||||
static inline sector_t get_start_sect(struct block_device *bdev)
|
||||
|
@@ -3,19 +3,18 @@
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/pinctrl/pinctrl.h>
|
||||
#include <linux/kconfig.h>
|
||||
|
||||
struct gpio_desc;
|
||||
struct of_phandle_args;
|
||||
struct device_node;
|
||||
struct seq_file;
|
||||
struct gpio_device;
|
||||
struct module;
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
|
||||
@@ -112,6 +111,10 @@ enum single_ended_mode {
|
||||
* initialization, provided by GPIO driver
|
||||
* @irq_parent: GPIO IRQ chip parent/bank linux irq number,
|
||||
* provided by GPIO driver
|
||||
* @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
|
||||
* bits set to one
|
||||
* @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
|
||||
* be included in IRQ domain of the chip
|
||||
* @lock_key: per GPIO IRQ chip lockdep class
|
||||
*
|
||||
* A gpio_chip can help platforms abstract various sources of GPIOs so
|
||||
@@ -190,6 +193,8 @@ struct gpio_chip {
|
||||
irq_flow_handler_t irq_handler;
|
||||
unsigned int irq_default_type;
|
||||
int irq_parent;
|
||||
bool irq_need_valid_mask;
|
||||
unsigned long *irq_valid_mask;
|
||||
struct lock_class_key *lock_key;
|
||||
#endif
|
||||
|
||||
|
@@ -236,6 +236,7 @@ struct hid_sensor_common {
|
||||
struct hid_sensor_hub_attribute_info report_state;
|
||||
struct hid_sensor_hub_attribute_info power_state;
|
||||
struct hid_sensor_hub_attribute_info sensitivity;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
/* Convert from hid unit expo to regular exponent */
|
||||
|
@@ -837,7 +837,7 @@ __u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
|
||||
*/
|
||||
static inline void hid_device_io_start(struct hid_device *hid) {
|
||||
if (hid->io_started) {
|
||||
dev_warn(&hid->dev, "io already started");
|
||||
dev_warn(&hid->dev, "io already started\n");
|
||||
return;
|
||||
}
|
||||
hid->io_started = true;
|
||||
@@ -857,7 +857,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
|
||||
*/
|
||||
static inline void hid_device_io_stop(struct hid_device *hid) {
|
||||
if (!hid->io_started) {
|
||||
dev_warn(&hid->dev, "io already stopped");
|
||||
dev_warn(&hid->dev, "io already stopped\n");
|
||||
return;
|
||||
}
|
||||
hid->io_started = false;
|
||||
|
@@ -87,6 +87,10 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
|
||||
|
||||
extern unsigned long transparent_hugepage_flags;
|
||||
|
||||
extern unsigned long thp_get_unmapped_area(struct file *filp,
|
||||
unsigned long addr, unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
|
||||
extern void prep_transhuge_page(struct page *page);
|
||||
extern void free_transhuge_page(struct page *page);
|
||||
|
||||
@@ -152,8 +156,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||
return is_huge_zero_page(pmd_page(pmd));
|
||||
}
|
||||
|
||||
struct page *get_huge_zero_page(void);
|
||||
void put_huge_zero_page(void);
|
||||
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
|
||||
void mm_put_huge_zero_page(struct mm_struct *mm);
|
||||
|
||||
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
|
||||
|
||||
@@ -169,6 +173,9 @@ void put_huge_zero_page(void);
|
||||
static inline void prep_transhuge_page(struct page *page) {}
|
||||
|
||||
#define transparent_hugepage_flags 0UL
|
||||
|
||||
#define thp_get_unmapped_area NULL
|
||||
|
||||
static inline int
|
||||
split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
{
|
||||
@@ -213,9 +220,9 @@ static inline bool is_huge_zero_page(struct page *page)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void put_huge_zero_page(void)
|
||||
static inline void mm_put_huge_zero_page(struct mm_struct *mm)
|
||||
{
|
||||
BUILD_BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
|
||||
|
@@ -90,7 +90,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
|
||||
bool isolate_huge_page(struct page *page, struct list_head *list);
|
||||
void putback_active_hugepage(struct page *page);
|
||||
void free_huge_page(struct page *page);
|
||||
void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
|
||||
void hugetlb_fix_reserve_counts(struct inode *inode);
|
||||
extern struct mutex *hugetlb_fault_mutex_table;
|
||||
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
|
||||
struct vm_area_struct *vma,
|
||||
@@ -450,8 +450,8 @@ static inline pgoff_t basepage_index(struct page *page)
|
||||
return __basepage_index(page);
|
||||
}
|
||||
|
||||
extern void dissolve_free_huge_pages(unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
extern int dissolve_free_huge_pages(unsigned long start_pfn,
|
||||
unsigned long end_pfn);
|
||||
static inline bool hugepage_migration_supported(struct hstate *h)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
@@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page)
|
||||
{
|
||||
return page->index;
|
||||
}
|
||||
#define dissolve_free_huge_pages(s, e) do {} while (0)
|
||||
#define dissolve_free_huge_pages(s, e) 0
|
||||
#define hugepage_migration_supported(h) false
|
||||
|
||||
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
||||
|
@@ -29,7 +29,9 @@
|
||||
* Returns the number of lower random bytes in "data".
|
||||
* Must not be NULL. *OBSOLETE*
|
||||
* @read: New API. drivers can fill up to max bytes of data
|
||||
* into the buffer. The buffer is aligned for any type.
|
||||
* into the buffer. The buffer is aligned for any type
|
||||
* and max is guaranteed to be >= to that alignment
|
||||
* (either 4 or 8 depending on architecture).
|
||||
* @priv: Private data, for use by the RNG driver.
|
||||
* @quality: Estimation of true entropy in RNG's bitstream
|
||||
* (per mill).
|
||||
|
@@ -14,9 +14,341 @@
|
||||
#ifndef _HWMON_H_
|
||||
#define _HWMON_H_
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
struct device;
|
||||
struct attribute_group;
|
||||
|
||||
enum hwmon_sensor_types {
|
||||
hwmon_chip,
|
||||
hwmon_temp,
|
||||
hwmon_in,
|
||||
hwmon_curr,
|
||||
hwmon_power,
|
||||
hwmon_energy,
|
||||
hwmon_humidity,
|
||||
hwmon_fan,
|
||||
hwmon_pwm,
|
||||
};
|
||||
|
||||
enum hwmon_chip_attributes {
|
||||
hwmon_chip_temp_reset_history,
|
||||
hwmon_chip_in_reset_history,
|
||||
hwmon_chip_curr_reset_history,
|
||||
hwmon_chip_power_reset_history,
|
||||
hwmon_chip_register_tz,
|
||||
hwmon_chip_update_interval,
|
||||
hwmon_chip_alarms,
|
||||
};
|
||||
|
||||
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
|
||||
#define HWMON_C_IN_RESET_HISTORY BIT(hwmon_chip_in_reset_history)
|
||||
#define HWMON_C_CURR_RESET_HISTORY BIT(hwmon_chip_curr_reset_history)
|
||||
#define HWMON_C_POWER_RESET_HISTORY BIT(hwmon_chip_power_reset_history)
|
||||
#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
|
||||
#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
|
||||
#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
|
||||
|
||||
enum hwmon_temp_attributes {
|
||||
hwmon_temp_input = 0,
|
||||
hwmon_temp_type,
|
||||
hwmon_temp_lcrit,
|
||||
hwmon_temp_lcrit_hyst,
|
||||
hwmon_temp_min,
|
||||
hwmon_temp_min_hyst,
|
||||
hwmon_temp_max,
|
||||
hwmon_temp_max_hyst,
|
||||
hwmon_temp_crit,
|
||||
hwmon_temp_crit_hyst,
|
||||
hwmon_temp_emergency,
|
||||
hwmon_temp_emergency_hyst,
|
||||
hwmon_temp_alarm,
|
||||
hwmon_temp_lcrit_alarm,
|
||||
hwmon_temp_min_alarm,
|
||||
hwmon_temp_max_alarm,
|
||||
hwmon_temp_crit_alarm,
|
||||
hwmon_temp_emergency_alarm,
|
||||
hwmon_temp_fault,
|
||||
hwmon_temp_offset,
|
||||
hwmon_temp_label,
|
||||
hwmon_temp_lowest,
|
||||
hwmon_temp_highest,
|
||||
hwmon_temp_reset_history,
|
||||
};
|
||||
|
||||
#define HWMON_T_INPUT BIT(hwmon_temp_input)
|
||||
#define HWMON_T_TYPE BIT(hwmon_temp_type)
|
||||
#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
|
||||
#define HWMON_T_LCRIT_HYST BIT(hwmon_temp_lcrit_hyst)
|
||||
#define HWMON_T_MIN BIT(hwmon_temp_min)
|
||||
#define HWMON_T_MIN_HYST BIT(hwmon_temp_min_hyst)
|
||||
#define HWMON_T_MAX BIT(hwmon_temp_max)
|
||||
#define HWMON_T_MAX_HYST BIT(hwmon_temp_max_hyst)
|
||||
#define HWMON_T_CRIT BIT(hwmon_temp_crit)
|
||||
#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
|
||||
#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
|
||||
#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
|
||||
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
|
||||
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
|
||||
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
|
||||
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
|
||||
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
|
||||
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
|
||||
#define HWMON_T_LABEL BIT(hwmon_temp_label)
|
||||
#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
|
||||
#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
|
||||
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
|
||||
|
||||
enum hwmon_in_attributes {
|
||||
hwmon_in_input,
|
||||
hwmon_in_min,
|
||||
hwmon_in_max,
|
||||
hwmon_in_lcrit,
|
||||
hwmon_in_crit,
|
||||
hwmon_in_average,
|
||||
hwmon_in_lowest,
|
||||
hwmon_in_highest,
|
||||
hwmon_in_reset_history,
|
||||
hwmon_in_label,
|
||||
hwmon_in_alarm,
|
||||
hwmon_in_min_alarm,
|
||||
hwmon_in_max_alarm,
|
||||
hwmon_in_lcrit_alarm,
|
||||
hwmon_in_crit_alarm,
|
||||
};
|
||||
|
||||
#define HWMON_I_INPUT BIT(hwmon_in_input)
|
||||
#define HWMON_I_MIN BIT(hwmon_in_min)
|
||||
#define HWMON_I_MAX BIT(hwmon_in_max)
|
||||
#define HWMON_I_LCRIT BIT(hwmon_in_lcrit)
|
||||
#define HWMON_I_CRIT BIT(hwmon_in_crit)
|
||||
#define HWMON_I_AVERAGE BIT(hwmon_in_average)
|
||||
#define HWMON_I_LOWEST BIT(hwmon_in_lowest)
|
||||
#define HWMON_I_HIGHEST BIT(hwmon_in_highest)
|
||||
#define HWMON_I_RESET_HISTORY BIT(hwmon_in_reset_history)
|
||||
#define HWMON_I_LABEL BIT(hwmon_in_label)
|
||||
#define HWMON_I_ALARM BIT(hwmon_in_alarm)
|
||||
#define HWMON_I_MIN_ALARM BIT(hwmon_in_min_alarm)
|
||||
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
|
||||
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
|
||||
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
|
||||
|
||||
enum hwmon_curr_attributes {
|
||||
hwmon_curr_input,
|
||||
hwmon_curr_min,
|
||||
hwmon_curr_max,
|
||||
hwmon_curr_lcrit,
|
||||
hwmon_curr_crit,
|
||||
hwmon_curr_average,
|
||||
hwmon_curr_lowest,
|
||||
hwmon_curr_highest,
|
||||
hwmon_curr_reset_history,
|
||||
hwmon_curr_label,
|
||||
hwmon_curr_alarm,
|
||||
hwmon_curr_min_alarm,
|
||||
hwmon_curr_max_alarm,
|
||||
hwmon_curr_lcrit_alarm,
|
||||
hwmon_curr_crit_alarm,
|
||||
};
|
||||
|
||||
#define HWMON_C_INPUT BIT(hwmon_curr_input)
|
||||
#define HWMON_C_MIN BIT(hwmon_curr_min)
|
||||
#define HWMON_C_MAX BIT(hwmon_curr_max)
|
||||
#define HWMON_C_LCRIT BIT(hwmon_curr_lcrit)
|
||||
#define HWMON_C_CRIT BIT(hwmon_curr_crit)
|
||||
#define HWMON_C_AVERAGE BIT(hwmon_curr_average)
|
||||
#define HWMON_C_LOWEST BIT(hwmon_curr_lowest)
|
||||
#define HWMON_C_HIGHEST BIT(hwmon_curr_highest)
|
||||
#define HWMON_C_RESET_HISTORY BIT(hwmon_curr_reset_history)
|
||||
#define HWMON_C_LABEL BIT(hwmon_curr_label)
|
||||
#define HWMON_C_ALARM BIT(hwmon_curr_alarm)
|
||||
#define HWMON_C_MIN_ALARM BIT(hwmon_curr_min_alarm)
|
||||
#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
|
||||
#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
|
||||
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
|
||||
|
||||
enum hwmon_power_attributes {
|
||||
hwmon_power_average,
|
||||
hwmon_power_average_interval,
|
||||
hwmon_power_average_interval_max,
|
||||
hwmon_power_average_interval_min,
|
||||
hwmon_power_average_highest,
|
||||
hwmon_power_average_lowest,
|
||||
hwmon_power_average_max,
|
||||
hwmon_power_average_min,
|
||||
hwmon_power_input,
|
||||
hwmon_power_input_highest,
|
||||
hwmon_power_input_lowest,
|
||||
hwmon_power_reset_history,
|
||||
hwmon_power_accuracy,
|
||||
hwmon_power_cap,
|
||||
hwmon_power_cap_hyst,
|
||||
hwmon_power_cap_max,
|
||||
hwmon_power_cap_min,
|
||||
hwmon_power_max,
|
||||
hwmon_power_crit,
|
||||
hwmon_power_label,
|
||||
hwmon_power_alarm,
|
||||
hwmon_power_cap_alarm,
|
||||
hwmon_power_max_alarm,
|
||||
hwmon_power_crit_alarm,
|
||||
};
|
||||
|
||||
#define HWMON_P_AVERAGE BIT(hwmon_power_average)
|
||||
#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
|
||||
#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
|
||||
#define HWMON_P_AVERAGE_INTERVAL_MIN BIT(hwmon_power_average_interval_min)
|
||||
#define HWMON_P_AVERAGE_HIGHEST BIT(hwmon_power_average_highest)
|
||||
#define HWMON_P_AVERAGE_LOWEST BIT(hwmon_power_average_lowest)
|
||||
#define HWMON_P_AVERAGE_MAX BIT(hwmon_power_average_max)
|
||||
#define HWMON_P_AVERAGE_MIN BIT(hwmon_power_average_min)
|
||||
#define HWMON_P_INPUT BIT(hwmon_power_input)
|
||||
#define HWMON_P_INPUT_HIGHEST BIT(hwmon_power_input_highest)
|
||||
#define HWMON_P_INPUT_LOWEST BIT(hwmon_power_input_lowest)
|
||||
#define HWMON_P_RESET_HISTORY BIT(hwmon_power_reset_history)
|
||||
#define HWMON_P_ACCURACY BIT(hwmon_power_accuracy)
|
||||
#define HWMON_P_CAP BIT(hwmon_power_cap)
|
||||
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
|
||||
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
|
||||
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
|
||||
#define HWMON_P_MAX BIT(hwmon_power_max)
|
||||
#define HWMON_P_CRIT BIT(hwmon_power_crit)
|
||||
#define HWMON_P_LABEL BIT(hwmon_power_label)
|
||||
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
|
||||
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
|
||||
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
|
||||
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
|
||||
|
||||
enum hwmon_energy_attributes {
|
||||
hwmon_energy_input,
|
||||
hwmon_energy_label,
|
||||
};
|
||||
|
||||
#define HWMON_E_INPUT BIT(hwmon_energy_input)
|
||||
#define HWMON_E_LABEL BIT(hwmon_energy_label)
|
||||
|
||||
enum hwmon_humidity_attributes {
|
||||
hwmon_humidity_input,
|
||||
hwmon_humidity_label,
|
||||
hwmon_humidity_min,
|
||||
hwmon_humidity_min_hyst,
|
||||
hwmon_humidity_max,
|
||||
hwmon_humidity_max_hyst,
|
||||
hwmon_humidity_alarm,
|
||||
hwmon_humidity_fault,
|
||||
};
|
||||
|
||||
#define HWMON_H_INPUT BIT(hwmon_humidity_input)
|
||||
#define HWMON_H_LABEL BIT(hwmon_humidity_label)
|
||||
#define HWMON_H_MIN BIT(hwmon_humidity_min)
|
||||
#define HWMON_H_MIN_HYST BIT(hwmon_humidity_min_hyst)
|
||||
#define HWMON_H_MAX BIT(hwmon_humidity_max)
|
||||
#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
|
||||
#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
|
||||
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
|
||||
|
||||
enum hwmon_fan_attributes {
|
||||
hwmon_fan_input,
|
||||
hwmon_fan_label,
|
||||
hwmon_fan_min,
|
||||
hwmon_fan_max,
|
||||
hwmon_fan_div,
|
||||
hwmon_fan_pulses,
|
||||
hwmon_fan_target,
|
||||
hwmon_fan_alarm,
|
||||
hwmon_fan_min_alarm,
|
||||
hwmon_fan_max_alarm,
|
||||
hwmon_fan_fault,
|
||||
};
|
||||
|
||||
#define HWMON_F_INPUT BIT(hwmon_fan_input)
|
||||
#define HWMON_F_LABEL BIT(hwmon_fan_label)
|
||||
#define HWMON_F_MIN BIT(hwmon_fan_min)
|
||||
#define HWMON_F_MAX BIT(hwmon_fan_max)
|
||||
#define HWMON_F_DIV BIT(hwmon_fan_div)
|
||||
#define HWMON_F_PULSES BIT(hwmon_fan_pulses)
|
||||
#define HWMON_F_TARGET BIT(hwmon_fan_target)
|
||||
#define HWMON_F_ALARM BIT(hwmon_fan_alarm)
|
||||
#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
|
||||
#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
|
||||
#define HWMON_F_FAULT BIT(hwmon_fan_fault)
|
||||
|
||||
enum hwmon_pwm_attributes {
|
||||
hwmon_pwm_input,
|
||||
hwmon_pwm_enable,
|
||||
hwmon_pwm_mode,
|
||||
hwmon_pwm_freq,
|
||||
};
|
||||
|
||||
#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
|
||||
#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
|
||||
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
|
||||
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
|
||||
|
||||
/**
|
||||
* struct hwmon_ops - hwmon device operations
|
||||
* @is_visible: Callback to return attribute visibility. Mandatory.
|
||||
* Parameters are:
|
||||
* @const void *drvdata:
|
||||
* Pointer to driver-private data structure passed
|
||||
* as argument to hwmon_device_register_with_info().
|
||||
* @type: Sensor type
|
||||
* @attr: Sensor attribute
|
||||
* @channel:
|
||||
* Channel number
|
||||
* The function returns the file permissions.
|
||||
* If the return value is 0, no attribute will be created.
|
||||
* @read: Read callback. Optional. If not provided, attributes
|
||||
* will not be readable.
|
||||
* Parameters are:
|
||||
* @dev: Pointer to hardware monitoring device
|
||||
* @type: Sensor type
|
||||
* @attr: Sensor attribute
|
||||
* @channel:
|
||||
* Channel number
|
||||
* @val: Pointer to returned value
|
||||
* The function returns 0 on success or a negative error number.
|
||||
* @write: Write callback. Optional. If not provided, attributes
|
||||
* will not be writable.
|
||||
* Parameters are:
|
||||
* @dev: Pointer to hardware monitoring device
|
||||
* @type: Sensor type
|
||||
* @attr: Sensor attribute
|
||||
* @channel:
|
||||
* Channel number
|
||||
* @val: Value to write
|
||||
* The function returns 0 on success or a negative error number.
|
||||
*/
|
||||
struct hwmon_ops {
|
||||
umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel);
|
||||
int (*read)(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long *val);
|
||||
int (*write)(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long val);
|
||||
};
|
||||
|
||||
/**
|
||||
* Channel information
|
||||
* @type: Channel type.
|
||||
* @config: Pointer to NULL-terminated list of channel parameters.
|
||||
* Use for per-channel attributes.
|
||||
*/
|
||||
struct hwmon_channel_info {
|
||||
enum hwmon_sensor_types type;
|
||||
const u32 *config;
|
||||
};
|
||||
|
||||
/**
|
||||
* Chip configuration
|
||||
* @ops: Pointer to hwmon operations.
|
||||
* @info: Null-terminated list of channel information.
|
||||
*/
|
||||
struct hwmon_chip_info {
|
||||
const struct hwmon_ops *ops;
|
||||
const struct hwmon_channel_info **info;
|
||||
};
|
||||
|
||||
struct device *hwmon_device_register(struct device *dev);
|
||||
struct device *
|
||||
hwmon_device_register_with_groups(struct device *dev, const char *name,
|
||||
@@ -26,6 +358,16 @@ struct device *
|
||||
devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
|
||||
void *drvdata,
|
||||
const struct attribute_group **groups);
|
||||
struct device *
|
||||
hwmon_device_register_with_info(struct device *dev,
|
||||
const char *name, void *drvdata,
|
||||
const struct hwmon_chip_info *info,
|
||||
const struct attribute_group **groups);
|
||||
struct device *
|
||||
devm_hwmon_device_register_with_info(struct device *dev,
|
||||
const char *name, void *drvdata,
|
||||
const struct hwmon_chip_info *info,
|
||||
const struct attribute_group **groups);
|
||||
|
||||
void hwmon_device_unregister(struct device *dev);
|
||||
void devm_hwmon_device_unregister(struct device *dev);
|
||||
|
@@ -674,6 +674,11 @@ enum hv_signal_policy {
|
||||
HV_SIGNAL_POLICY_EXPLICIT,
|
||||
};
|
||||
|
||||
enum hv_numa_policy {
|
||||
HV_BALANCED = 0,
|
||||
HV_LOCALIZED,
|
||||
};
|
||||
|
||||
enum vmbus_device_type {
|
||||
HV_IDE = 0,
|
||||
HV_SCSI,
|
||||
@@ -701,9 +706,6 @@ struct vmbus_device {
|
||||
};
|
||||
|
||||
struct vmbus_channel {
|
||||
/* Unique channel id */
|
||||
int id;
|
||||
|
||||
struct list_head listentry;
|
||||
|
||||
struct hv_device *device_obj;
|
||||
@@ -850,6 +852,43 @@ struct vmbus_channel {
|
||||
* ring lock to preserve the current behavior.
|
||||
*/
|
||||
bool acquire_ring_lock;
|
||||
/*
|
||||
* For performance critical channels (storage, networking
|
||||
* etc,), Hyper-V has a mechanism to enhance the throughput
|
||||
* at the expense of latency:
|
||||
* When the host is to be signaled, we just set a bit in a shared page
|
||||
* and this bit will be inspected by the hypervisor within a certain
|
||||
* window and if the bit is set, the host will be signaled. The window
|
||||
* of time is the monitor latency - currently around 100 usecs. This
|
||||
* mechanism improves throughput by:
|
||||
*
|
||||
* A) Making the host more efficient - each time it wakes up,
|
||||
* potentially it will process morev number of packets. The
|
||||
* monitor latency allows a batch to build up.
|
||||
* B) By deferring the hypercall to signal, we will also minimize
|
||||
* the interrupts.
|
||||
*
|
||||
* Clearly, these optimizations improve throughput at the expense of
|
||||
* latency. Furthermore, since the channel is shared for both
|
||||
* control and data messages, control messages currently suffer
|
||||
* unnecessary latency adversley impacting performance and boot
|
||||
* time. To fix this issue, permit tagging the channel as being
|
||||
* in "low latency" mode. In this mode, we will bypass the monitor
|
||||
* mechanism.
|
||||
*/
|
||||
bool low_latency;
|
||||
|
||||
/*
|
||||
* NUMA distribution policy:
|
||||
* We support teo policies:
|
||||
* 1) Balanced: Here all performance critical channels are
|
||||
* distributed evenly amongst all the NUMA nodes.
|
||||
* This policy will be the default policy.
|
||||
* 2) Localized: All channels of a given instance of a
|
||||
* performance critical service will be assigned CPUs
|
||||
* within a selected NUMA node.
|
||||
*/
|
||||
enum hv_numa_policy affinity_policy;
|
||||
|
||||
};
|
||||
|
||||
@@ -870,6 +909,12 @@ static inline void set_channel_signal_state(struct vmbus_channel *c,
|
||||
c->signal_policy = policy;
|
||||
}
|
||||
|
||||
static inline void set_channel_affinity_state(struct vmbus_channel *c,
|
||||
enum hv_numa_policy policy)
|
||||
{
|
||||
c->affinity_policy = policy;
|
||||
}
|
||||
|
||||
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
|
||||
{
|
||||
c->batched_reading = state;
|
||||
@@ -891,6 +936,16 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c,
|
||||
c->outbound.ring_buffer->pending_send_sz = size;
|
||||
}
|
||||
|
||||
static inline void set_low_latency_mode(struct vmbus_channel *c)
|
||||
{
|
||||
c->low_latency = true;
|
||||
}
|
||||
|
||||
static inline void clear_low_latency_mode(struct vmbus_channel *c)
|
||||
{
|
||||
c->low_latency = false;
|
||||
}
|
||||
|
||||
void vmbus_onmessage(void *context);
|
||||
|
||||
int vmbus_request_offers(void);
|
||||
@@ -1114,6 +1169,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
|
||||
const char *mod_name);
|
||||
void vmbus_driver_unregister(struct hv_driver *hv_driver);
|
||||
|
||||
static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
|
||||
{
|
||||
const struct kobject *kobj = &device_obj->device.kobj;
|
||||
|
||||
return kobj->name;
|
||||
}
|
||||
|
||||
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
|
||||
|
||||
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
|
||||
@@ -1256,6 +1318,27 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
|
||||
.guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
|
||||
0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
|
||||
|
||||
/*
|
||||
* Linux doesn't support the 3 devices: the first two are for
|
||||
* Automatic Virtual Machine Activation, and the third is for
|
||||
* Remote Desktop Virtualization.
|
||||
* {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
|
||||
* {3375baf4-9e15-4b30-b765-67acb10d607b}
|
||||
* {276aacf4-ac15-426c-98dd-7521ad3f01fe}
|
||||
*/
|
||||
|
||||
#define HV_AVMA1_GUID \
|
||||
.guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
|
||||
0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
|
||||
|
||||
#define HV_AVMA2_GUID \
|
||||
.guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
|
||||
0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
|
||||
|
||||
#define HV_RDV_GUID \
|
||||
.guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
|
||||
0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
|
||||
|
||||
/*
|
||||
* Common header for Hyper-V ICs
|
||||
*/
|
||||
@@ -1344,6 +1427,15 @@ struct ictimesync_data {
|
||||
u8 flags;
|
||||
} __packed;
|
||||
|
||||
struct ictimesync_ref_data {
|
||||
u64 parenttime;
|
||||
u64 vmreferencetime;
|
||||
u8 flags;
|
||||
char leapflags;
|
||||
char stratum;
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
struct hyperv_service_callback {
|
||||
u8 msg_type;
|
||||
char *log_msg;
|
||||
@@ -1357,6 +1449,9 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
|
||||
struct icmsg_negotiate *, u8 *, int,
|
||||
int);
|
||||
|
||||
void hv_event_tasklet_disable(struct vmbus_channel *channel);
|
||||
void hv_event_tasklet_enable(struct vmbus_channel *channel);
|
||||
|
||||
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
|
||||
|
||||
/*
|
||||
|
17
include/linux/hypervisor.h
Normal file
17
include/linux/hypervisor.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef __LINUX_HYPEVISOR_H
|
||||
#define __LINUX_HYPEVISOR_H
|
||||
|
||||
/*
|
||||
* Generic Hypervisor support
|
||||
* Juergen Gross <jgross@suse.com>
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_HYPERVISOR_GUEST
|
||||
#include <asm/hypervisor.h>
|
||||
#else
|
||||
static inline void hypervisor_pin_vcpu(int cpu)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_HYPEVISOR_H */
|
@@ -32,7 +32,9 @@
|
||||
struct i2c_mux_core {
|
||||
struct i2c_adapter *parent;
|
||||
struct device *dev;
|
||||
bool mux_locked;
|
||||
unsigned int mux_locked:1;
|
||||
unsigned int arbitrator:1;
|
||||
unsigned int gate:1;
|
||||
|
||||
void *priv;
|
||||
|
||||
@@ -51,7 +53,9 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
|
||||
int (*deselect)(struct i2c_mux_core *, u32));
|
||||
|
||||
/* flags for i2c_mux_alloc */
|
||||
#define I2C_MUX_LOCKED BIT(0)
|
||||
#define I2C_MUX_LOCKED BIT(0)
|
||||
#define I2C_MUX_ARBITRATOR BIT(1)
|
||||
#define I2C_MUX_GATE BIT(2)
|
||||
|
||||
static inline void *i2c_mux_priv(struct i2c_mux_core *muxc)
|
||||
{
|
||||
|
@@ -426,6 +426,20 @@ struct i2c_algorithm {
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct i2c_lock_operations - represent I2C locking operations
|
||||
* @lock_bus: Get exclusive access to an I2C bus segment
|
||||
* @trylock_bus: Try to get exclusive access to an I2C bus segment
|
||||
* @unlock_bus: Release exclusive access to an I2C bus segment
|
||||
*
|
||||
* The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus.
|
||||
*/
|
||||
struct i2c_lock_operations {
|
||||
void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
|
||||
int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
|
||||
void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct i2c_timings - I2C timing information
|
||||
* @bus_freq_hz: the bus frequency in Hz
|
||||
@@ -536,6 +550,7 @@ struct i2c_adapter {
|
||||
void *algo_data;
|
||||
|
||||
/* data fields that are valid for all devices */
|
||||
const struct i2c_lock_operations *lock_ops;
|
||||
struct rt_mutex bus_lock;
|
||||
struct rt_mutex mux_lock;
|
||||
|
||||
@@ -552,10 +567,6 @@ struct i2c_adapter {
|
||||
|
||||
struct i2c_bus_recovery_info *bus_recovery_info;
|
||||
const struct i2c_adapter_quirks *quirks;
|
||||
|
||||
void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
|
||||
int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
|
||||
void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
|
||||
};
|
||||
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
|
||||
|
||||
@@ -597,7 +608,21 @@ int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
|
||||
static inline void
|
||||
i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
|
||||
{
|
||||
adapter->lock_bus(adapter, flags);
|
||||
adapter->lock_ops->lock_bus(adapter, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* i2c_trylock_bus - Try to get exclusive access to an I2C bus segment
|
||||
* @adapter: Target I2C bus segment
|
||||
* @flags: I2C_LOCK_ROOT_ADAPTER tries to locks the root i2c adapter,
|
||||
* I2C_LOCK_SEGMENT tries to lock only this branch in the adapter tree
|
||||
*
|
||||
* Return: true if the I2C bus segment is locked, false otherwise
|
||||
*/
|
||||
static inline int
|
||||
i2c_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
|
||||
{
|
||||
return adapter->lock_ops->trylock_bus(adapter, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -609,7 +634,7 @@ i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
|
||||
static inline void
|
||||
i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
|
||||
{
|
||||
adapter->unlock_bus(adapter, flags);
|
||||
adapter->lock_ops->unlock_bus(adapter, flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -673,6 +698,7 @@ extern void i2c_clients_command(struct i2c_adapter *adap,
|
||||
|
||||
extern struct i2c_adapter *i2c_get_adapter(int nr);
|
||||
extern void i2c_put_adapter(struct i2c_adapter *adap);
|
||||
extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
|
||||
|
||||
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
|
||||
|
||||
@@ -766,4 +792,13 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
u32 i2c_acpi_find_bus_speed(struct device *dev);
|
||||
#else
|
||||
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
#endif /* _LINUX_I2C_H */
|
||||
|
@@ -45,6 +45,7 @@ struct br_ip_list {
|
||||
#define BR_PROXYARP BIT(8)
|
||||
#define BR_LEARNING_SYNC BIT(9)
|
||||
#define BR_PROXYARP_WIFI BIT(10)
|
||||
#define BR_MCAST_FLOOD BIT(11)
|
||||
|
||||
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
|
||||
|
||||
|
@@ -25,5 +25,6 @@ struct ifla_vf_info {
|
||||
__u32 max_tx_rate;
|
||||
__u32 rss_query_en;
|
||||
__u32 trusted;
|
||||
__be16 vlan_proto;
|
||||
};
|
||||
#endif /* _LINUX_IF_LINK_H */
|
||||
|
@@ -245,7 +245,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int team_num_to_port_index(struct team *team, int num)
|
||||
static inline int team_num_to_port_index(struct team *team, unsigned int num)
|
||||
{
|
||||
int en_port_count = ACCESS_ONCE(team->en_port_count);
|
||||
|
||||
|
@@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
|
||||
#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
|
||||
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
|
||||
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
|
||||
#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
|
||||
|
||||
/**
|
||||
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
|
||||
@@ -271,6 +272,23 @@ static inline int vlan_get_encap_level(struct net_device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* eth_type_vlan - check for valid vlan ether type.
|
||||
* @ethertype: ether type to check
|
||||
*
|
||||
* Returns true if the ether type is a vlan ether type.
|
||||
*/
|
||||
static inline bool eth_type_vlan(__be16 ethertype)
|
||||
{
|
||||
switch (ethertype) {
|
||||
case htons(ETH_P_8021Q):
|
||||
case htons(ETH_P_8021AD):
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool vlan_hw_offload_capable(netdev_features_t features,
|
||||
__be16 proto)
|
||||
{
|
||||
@@ -424,8 +442,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
||||
{
|
||||
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
|
||||
|
||||
if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
|
||||
veth->h_vlan_proto != htons(ETH_P_8021AD))
|
||||
if (!eth_type_vlan(veth->h_vlan_proto))
|
||||
return -EINVAL;
|
||||
|
||||
*vlan_tci = ntohs(veth->h_vlan_TCI);
|
||||
@@ -487,7 +504,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
|
||||
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
|
||||
* ETH_HLEN otherwise
|
||||
*/
|
||||
if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
|
||||
if (eth_type_vlan(type)) {
|
||||
if (vlan_depth) {
|
||||
if (WARN_ON(vlan_depth < VLAN_HLEN))
|
||||
return 0;
|
||||
@@ -505,8 +522,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
|
||||
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
|
||||
type = vh->h_vlan_encapsulated_proto;
|
||||
vlan_depth += VLAN_HLEN;
|
||||
} while (type == htons(ETH_P_8021Q) ||
|
||||
type == htons(ETH_P_8021AD));
|
||||
} while (eth_type_vlan(type));
|
||||
}
|
||||
|
||||
if (depth)
|
||||
@@ -571,8 +587,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
|
||||
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
|
||||
{
|
||||
if (!skb_vlan_tag_present(skb) &&
|
||||
likely(skb->protocol != htons(ETH_P_8021Q) &&
|
||||
skb->protocol != htons(ETH_P_8021AD)))
|
||||
likely(!eth_type_vlan(skb->protocol)))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@@ -592,15 +607,14 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
|
||||
if (!skb_vlan_tag_present(skb)) {
|
||||
struct vlan_ethhdr *veh;
|
||||
|
||||
if (likely(protocol != htons(ETH_P_8021Q) &&
|
||||
protocol != htons(ETH_P_8021AD)))
|
||||
if (likely(!eth_type_vlan(protocol)))
|
||||
return false;
|
||||
|
||||
veh = (struct vlan_ethhdr *)skb->data;
|
||||
protocol = veh->h_vlan_encapsulated_proto;
|
||||
}
|
||||
|
||||
if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
|
||||
if (!eth_type_vlan(protocol))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@@ -164,6 +164,18 @@ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
|
||||
struct iio_channel
|
||||
*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer);
|
||||
|
||||
/**
|
||||
* iio_channel_cb_get_iio_dev() - get access to the underlying device.
|
||||
* @cb_buffer: The callback buffer from whom we want the device
|
||||
* information.
|
||||
*
|
||||
* This function allows one to obtain information about the device.
|
||||
* The primary aim is to allow drivers that are consuming a device to query
|
||||
* things like current trigger.
|
||||
*/
|
||||
struct iio_dev
|
||||
*iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer);
|
||||
|
||||
/**
|
||||
* iio_read_channel_raw() - read from a given channel
|
||||
* @chan: The channel being queried.
|
||||
|
@@ -483,6 +483,7 @@ struct iio_buffer_setup_ops {
|
||||
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
|
||||
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
|
||||
* @trig: [INTERN] current device trigger (buffer modes)
|
||||
* @trig_readonly [INTERN] mark the current trigger immutable
|
||||
* @pollfunc: [DRIVER] function run on trigger being received
|
||||
* @pollfunc_event: [DRIVER] function run on events trigger being received
|
||||
* @channels: [DRIVER] channel specification structure table
|
||||
@@ -523,6 +524,7 @@ struct iio_dev {
|
||||
bool scan_timestamp;
|
||||
unsigned scan_index_timestamp;
|
||||
struct iio_trigger *trig;
|
||||
bool trig_readonly;
|
||||
struct iio_poll_func *pollfunc;
|
||||
struct iio_poll_func *pollfunc_event;
|
||||
|
||||
@@ -642,6 +644,7 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
|
||||
}
|
||||
|
||||
void iio_device_free(struct iio_dev *indio_dev);
|
||||
int devm_iio_device_match(struct device *dev, void *res, void *data);
|
||||
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
|
||||
void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
|
||||
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
|
||||
|
@@ -56,6 +56,9 @@ struct iio_trigger_ops {
|
||||
* @subirqs: [INTERN] information about the 'child' irqs.
|
||||
* @pool: [INTERN] bitmap of irqs currently in use.
|
||||
* @pool_lock: [INTERN] protection of the irq pool.
|
||||
* @attached_own_device:[INTERN] if we are using our own device as trigger,
|
||||
* i.e. if we registered a poll function to the same
|
||||
* device as the one providing the trigger.
|
||||
**/
|
||||
struct iio_trigger {
|
||||
const struct iio_trigger_ops *ops;
|
||||
@@ -73,6 +76,7 @@ struct iio_trigger {
|
||||
struct iio_subirq subirqs[CONFIG_IIO_CONSUMERS_PER_TRIGGER];
|
||||
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
|
||||
struct mutex pool_lock;
|
||||
bool attached_own_device;
|
||||
};
|
||||
|
||||
|
||||
@@ -125,12 +129,27 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
|
||||
**/
|
||||
int iio_trigger_register(struct iio_trigger *trig_info);
|
||||
|
||||
int devm_iio_trigger_register(struct device *dev,
|
||||
struct iio_trigger *trig_info);
|
||||
|
||||
/**
|
||||
* iio_trigger_unregister() - unregister a trigger from the core
|
||||
* @trig_info: trigger to be unregistered
|
||||
**/
|
||||
void iio_trigger_unregister(struct iio_trigger *trig_info);
|
||||
|
||||
void devm_iio_trigger_unregister(struct device *dev,
|
||||
struct iio_trigger *trig_info);
|
||||
|
||||
/**
|
||||
* iio_trigger_set_immutable() - set an immutable trigger on destination
|
||||
*
|
||||
* @indio_dev - IIO device structure containing the device
|
||||
* @trig - trigger to assign to device
|
||||
*
|
||||
**/
|
||||
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
|
||||
|
||||
/**
|
||||
* iio_trigger_poll() - called on a trigger occurring
|
||||
* @trig: trigger which occurred
|
||||
@@ -145,6 +164,13 @@ irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
|
||||
__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
|
||||
void iio_trigger_free(struct iio_trigger *trig);
|
||||
|
||||
/**
|
||||
* iio_trigger_using_own() - tells us if we use our own HW trigger ourselves
|
||||
* @indio_dev: device to check
|
||||
*/
|
||||
bool iio_trigger_using_own(struct iio_dev *indio_dev);
|
||||
|
||||
|
||||
#else
|
||||
struct iio_trigger;
|
||||
struct iio_trigger_ops;
|
||||
|
@@ -12,4 +12,12 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
|
||||
const struct iio_buffer_setup_ops *setup_ops);
|
||||
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
|
||||
|
||||
int devm_iio_triggered_buffer_setup(struct device *dev,
|
||||
struct iio_dev *indio_dev,
|
||||
irqreturn_t (*h)(int irq, void *p),
|
||||
irqreturn_t (*thread)(int irq, void *p),
|
||||
const struct iio_buffer_setup_ops *ops);
|
||||
void devm_iio_triggered_buffer_cleanup(struct device *dev,
|
||||
struct iio_dev *indio_dev);
|
||||
|
||||
#endif
|
||||
|
@@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
||||
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
|
||||
struct user_namespace *user_ns,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh);
|
||||
const struct nlmsghdr *unlh, bool net_admin);
|
||||
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
const struct inet_diag_req_v2 *r,
|
||||
@@ -56,7 +56,7 @@ void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
|
||||
|
||||
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
|
||||
struct inet_diag_msg *r, int ext,
|
||||
struct user_namespace *user_ns);
|
||||
struct user_namespace *user_ns, bool net_admin);
|
||||
|
||||
extern int inet_diag_register(const struct inet_diag_handler *handler);
|
||||
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user