Merge l2-mtd/next into l2-mtd/master
This commit is contained in:
@@ -24,14 +24,10 @@ struct super_block;
|
||||
struct pacct_struct;
|
||||
struct pid_namespace;
|
||||
extern int acct_parm[]; /* for sysctl */
|
||||
extern void acct_auto_close_mnt(struct vfsmount *m);
|
||||
extern void acct_auto_close(struct super_block *sb);
|
||||
extern void acct_collect(long exitcode, int group_dead);
|
||||
extern void acct_process(void);
|
||||
extern void acct_exit_ns(struct pid_namespace *);
|
||||
#else
|
||||
#define acct_auto_close_mnt(x) do { } while (0)
|
||||
#define acct_auto_close(x) do { } while (0)
|
||||
#define acct_collect(x,y) do { } while (0)
|
||||
#define acct_process() do { } while (0)
|
||||
#define acct_exit_ns(ns) do { } while (0)
|
||||
|
@@ -29,17 +29,17 @@
|
||||
#include <linux/ioport.h> /* for struct resource */
|
||||
#include <linux/device.h>
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
#ifndef _LINUX
|
||||
#define _LINUX
|
||||
#endif
|
||||
#include <acpi/acpi.h>
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/dynamic_debug.h>
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
#include <acpi/acpi_numa.h>
|
||||
@@ -364,6 +364,17 @@ extern bool osc_sb_apei_support_acked;
|
||||
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
|
||||
#define OSC_PCI_CONTROL_MASKS 0x0000001f
|
||||
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
|
||||
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
||||
u32 *mask, u32 req);
|
||||
|
||||
|
@@ -11,6 +11,8 @@
|
||||
#define AER_FATAL 1
|
||||
#define AER_CORRECTABLE 2
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
struct aer_header_log_regs {
|
||||
unsigned int dw0;
|
||||
unsigned int dw1;
|
||||
|
@@ -43,10 +43,7 @@ struct ahci_host_priv *ahci_platform_get_resources(
|
||||
struct platform_device *pdev);
|
||||
int ahci_platform_init_host(struct platform_device *pdev,
|
||||
struct ahci_host_priv *hpriv,
|
||||
const struct ata_port_info *pi_template,
|
||||
unsigned long host_flags,
|
||||
unsigned int force_port_map,
|
||||
unsigned int mask_port_map);
|
||||
const struct ata_port_info *pi_template);
|
||||
|
||||
int ahci_platform_suspend_host(struct device *dev);
|
||||
int ahci_platform_resume_host(struct device *dev);
|
||||
|
@@ -119,6 +119,13 @@ typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
|
||||
extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
|
||||
amd_iommu_invalid_ppr_cb cb);
|
||||
|
||||
#define PPR_FAULT_EXEC (1 << 1)
|
||||
#define PPR_FAULT_READ (1 << 2)
|
||||
#define PPR_FAULT_WRITE (1 << 5)
|
||||
#define PPR_FAULT_USER (1 << 6)
|
||||
#define PPR_FAULT_RSVD (1 << 7)
|
||||
#define PPR_FAULT_GN (1 << 8)
|
||||
|
||||
/**
|
||||
* amd_iommu_device_info() - Get information about IOMMUv2 support of a
|
||||
* PCI device
|
||||
|
@@ -22,10 +22,6 @@
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/irqreturn.h>
|
||||
|
||||
#ifndef bool
|
||||
#define bool int
|
||||
#endif
|
||||
|
||||
/*
|
||||
* RECON_THRESHOLD is the maximum number of RECON messages to receive
|
||||
* within one minute before printing a "cabling problem" warning. The
|
||||
@@ -285,9 +281,9 @@ struct arcnet_local {
|
||||
unsigned long first_recon; /* time of "first" RECON message to count */
|
||||
unsigned long last_recon; /* time of most recent RECON */
|
||||
int num_recons; /* number of RECONs between first and last. */
|
||||
bool network_down; /* do we think the network is down? */
|
||||
int network_down; /* do we think the network is down? */
|
||||
|
||||
bool excnak_pending; /* We just got an excesive nak interrupt */
|
||||
int excnak_pending; /* We just got an excesive nak interrupt */
|
||||
|
||||
struct {
|
||||
uint16_t sequence; /* sequence number (incs with each packet) */
|
||||
@@ -305,7 +301,7 @@ struct arcnet_local {
|
||||
void (*command) (struct net_device * dev, int cmd);
|
||||
int (*status) (struct net_device * dev);
|
||||
void (*intmask) (struct net_device * dev, int mask);
|
||||
bool (*reset) (struct net_device * dev, bool really_reset);
|
||||
int (*reset) (struct net_device * dev, int really_reset);
|
||||
void (*open) (struct net_device * dev);
|
||||
void (*close) (struct net_device * dev);
|
||||
|
||||
|
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2007 Atmel Corporation
|
||||
*
|
||||
* Driver for the AT32AP700X PS/2 controller (PSIF).
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __INCLUDE_ATMEL_PWM_BL_H
|
||||
#define __INCLUDE_ATMEL_PWM_BL_H
|
||||
|
||||
/**
|
||||
* struct atmel_pwm_bl_platform_data
|
||||
* @pwm_channel: which PWM channel in the PWM module to use.
|
||||
* @pwm_frequency: PWM frequency to generate, the driver will try to be as
|
||||
* close as the prescaler allows.
|
||||
* @pwm_compare_max: value to use in the PWM channel compare register.
|
||||
* @pwm_duty_max: maximum duty cycle value, must be less than or equal to
|
||||
* pwm_compare_max.
|
||||
* @pwm_duty_min: minimum duty cycle value, must be less than pwm_duty_max.
|
||||
* @pwm_active_low: set to one if the low part of the PWM signal increases the
|
||||
* brightness of the backlight.
|
||||
* @gpio_on: GPIO line to control the backlight on/off, set to -1 if not used.
|
||||
* @on_active_low: set to one if the on/off signal is on when GPIO is low.
|
||||
*
|
||||
* This struct must be added to the platform device in the board code. It is
|
||||
* used by the atmel-pwm-bl driver to setup the GPIO to control on/off and the
|
||||
* PWM device.
|
||||
*/
|
||||
struct atmel_pwm_bl_platform_data {
|
||||
unsigned int pwm_channel;
|
||||
unsigned int pwm_frequency;
|
||||
unsigned int pwm_compare_max;
|
||||
unsigned int pwm_duty_max;
|
||||
unsigned int pwm_duty_min;
|
||||
unsigned int pwm_active_low;
|
||||
int gpio_on;
|
||||
unsigned int on_active_low;
|
||||
};
|
||||
|
||||
#endif /* __INCLUDE_ATMEL_PWM_BL_H */
|
@@ -7,6 +7,7 @@
|
||||
|
||||
struct atmel_ssc_platform_data {
|
||||
int use_dma;
|
||||
int has_fslen_ext;
|
||||
};
|
||||
|
||||
struct ssc_device {
|
||||
@@ -71,6 +72,12 @@ void ssc_free(struct ssc_device *ssc);
|
||||
#define SSC_RFMR_DATNB_OFFSET 8
|
||||
#define SSC_RFMR_FSEDGE_SIZE 1
|
||||
#define SSC_RFMR_FSEDGE_OFFSET 24
|
||||
/*
|
||||
* The FSLEN_EXT exist on at91sam9rl, at91sam9g10,
|
||||
* at91sam9g20, and at91sam9g45 and newer SoCs
|
||||
*/
|
||||
#define SSC_RFMR_FSLEN_EXT_SIZE 4
|
||||
#define SSC_RFMR_FSLEN_EXT_OFFSET 28
|
||||
#define SSC_RFMR_FSLEN_SIZE 4
|
||||
#define SSC_RFMR_FSLEN_OFFSET 16
|
||||
#define SSC_RFMR_FSOS_SIZE 4
|
||||
@@ -109,6 +116,12 @@ void ssc_free(struct ssc_device *ssc);
|
||||
#define SSC_TFMR_FSDEN_OFFSET 23
|
||||
#define SSC_TFMR_FSEDGE_SIZE 1
|
||||
#define SSC_TFMR_FSEDGE_OFFSET 24
|
||||
/*
|
||||
* The FSLEN_EXT exist on at91sam9rl, at91sam9g10,
|
||||
* at91sam9g20, and at91sam9g45 and newer SoCs
|
||||
*/
|
||||
#define SSC_TFMR_FSLEN_EXT_SIZE 4
|
||||
#define SSC_TFMR_FSLEN_EXT_OFFSET 28
|
||||
#define SSC_TFMR_FSLEN_SIZE 4
|
||||
#define SSC_TFMR_FSLEN_OFFSET 16
|
||||
#define SSC_TFMR_FSOS_SIZE 3
|
||||
|
@@ -1,70 +0,0 @@
|
||||
#ifndef __LINUX_ATMEL_PWM_H
|
||||
#define __LINUX_ATMEL_PWM_H
|
||||
|
||||
/**
|
||||
* struct pwm_channel - driver handle to a PWM channel
|
||||
* @regs: base of this channel's registers
|
||||
* @index: number of this channel (0..31)
|
||||
* @mck: base clock rate, which can be prescaled and maybe subdivided
|
||||
*
|
||||
* Drivers initialize a pwm_channel structure using pwm_channel_alloc().
|
||||
* Then they configure its clock rate (derived from MCK), alignment,
|
||||
* polarity, and duty cycle by writing directly to the channel registers,
|
||||
* before enabling the channel by calling pwm_channel_enable().
|
||||
*
|
||||
* After emitting a PWM signal for the desired length of time, drivers
|
||||
* may then pwm_channel_disable() or pwm_channel_free(). Both of these
|
||||
* disable the channel, but when it's freed the IRQ is deconfigured and
|
||||
* the channel must later be re-allocated and reconfigured.
|
||||
*
|
||||
* Note that if the period or duty cycle need to be changed while the
|
||||
* PWM channel is operating, drivers must use the PWM_CUPD double buffer
|
||||
* mechanism, either polling until they change or getting implicitly
|
||||
* notified through a once-per-period interrupt handler.
|
||||
*/
|
||||
struct pwm_channel {
|
||||
void __iomem *regs;
|
||||
unsigned index;
|
||||
unsigned long mck;
|
||||
};
|
||||
|
||||
extern int pwm_channel_alloc(int index, struct pwm_channel *ch);
|
||||
extern int pwm_channel_free(struct pwm_channel *ch);
|
||||
|
||||
extern int pwm_clk_alloc(unsigned prescale, unsigned div);
|
||||
extern void pwm_clk_free(unsigned clk);
|
||||
|
||||
extern int __pwm_channel_onoff(struct pwm_channel *ch, int enabled);
|
||||
|
||||
#define pwm_channel_enable(ch) __pwm_channel_onoff((ch), 1)
|
||||
#define pwm_channel_disable(ch) __pwm_channel_onoff((ch), 0)
|
||||
|
||||
/* periodic interrupts, mostly for CUPD changes to period or cycle */
|
||||
extern int pwm_channel_handler(struct pwm_channel *ch,
|
||||
void (*handler)(struct pwm_channel *ch));
|
||||
|
||||
/* per-channel registers (banked at pwm_channel->regs) */
|
||||
#define PWM_CMR 0x00 /* mode register */
|
||||
#define PWM_CPR_CPD (1 << 10) /* set: CUPD modifies period */
|
||||
#define PWM_CPR_CPOL (1 << 9) /* set: idle high */
|
||||
#define PWM_CPR_CALG (1 << 8) /* set: center align */
|
||||
#define PWM_CPR_CPRE (0xf << 0) /* mask: rate is mck/(2^pre) */
|
||||
#define PWM_CPR_CLKA (0xb << 0) /* rate CLKA */
|
||||
#define PWM_CPR_CLKB (0xc << 0) /* rate CLKB */
|
||||
#define PWM_CDTY 0x04 /* duty cycle (max of CPRD) */
|
||||
#define PWM_CPRD 0x08 /* period (count up from zero) */
|
||||
#define PWM_CCNT 0x0c /* counter (20 bits?) */
|
||||
#define PWM_CUPD 0x10 /* update CPRD (or CDTY) next period */
|
||||
|
||||
static inline void
|
||||
pwm_channel_writel(struct pwm_channel *pwmc, unsigned offset, u32 val)
|
||||
{
|
||||
__raw_writel(val, pwmc->regs + offset);
|
||||
}
|
||||
|
||||
static inline u32 pwm_channel_readl(struct pwm_channel *pwmc, unsigned offset)
|
||||
{
|
||||
return __raw_readl(pwmc->regs + offset);
|
||||
}
|
||||
|
||||
#endif /* __LINUX_ATMEL_PWM_H */
|
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/bcma/bcma_driver_chipcommon.h>
|
||||
#include <linux/bcma/bcma_driver_pci.h>
|
||||
#include <linux/bcma/bcma_driver_pcie2.h>
|
||||
#include <linux/bcma/bcma_driver_mips.h>
|
||||
#include <linux/bcma/bcma_driver_gmac_cmn.h>
|
||||
#include <linux/ssb/ssb.h> /* SPROM sharing */
|
||||
@@ -72,17 +73,17 @@ struct bcma_host_ops {
|
||||
/* Core-ID values. */
|
||||
#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
|
||||
#define BCMA_CORE_4706_CHIPCOMMON 0x500
|
||||
#define BCMA_CORE_PCIEG2 0x501
|
||||
#define BCMA_CORE_DMA 0x502
|
||||
#define BCMA_CORE_SDIO3 0x503
|
||||
#define BCMA_CORE_USB20 0x504
|
||||
#define BCMA_CORE_USB30 0x505
|
||||
#define BCMA_CORE_A9JTAG 0x506
|
||||
#define BCMA_CORE_DDR23 0x507
|
||||
#define BCMA_CORE_ROM 0x508
|
||||
#define BCMA_CORE_NAND 0x509
|
||||
#define BCMA_CORE_QSPI 0x50A
|
||||
#define BCMA_CORE_CHIPCOMMON_B 0x50B
|
||||
#define BCMA_CORE_NS_PCIEG2 0x501
|
||||
#define BCMA_CORE_NS_DMA 0x502
|
||||
#define BCMA_CORE_NS_SDIO3 0x503
|
||||
#define BCMA_CORE_NS_USB20 0x504
|
||||
#define BCMA_CORE_NS_USB30 0x505
|
||||
#define BCMA_CORE_NS_A9JTAG 0x506
|
||||
#define BCMA_CORE_NS_DDR23 0x507
|
||||
#define BCMA_CORE_NS_ROM 0x508
|
||||
#define BCMA_CORE_NS_NAND 0x509
|
||||
#define BCMA_CORE_NS_QSPI 0x50A
|
||||
#define BCMA_CORE_NS_CHIPCOMMON_B 0x50B
|
||||
#define BCMA_CORE_4706_SOC_RAM 0x50E
|
||||
#define BCMA_CORE_ARMCA9 0x510
|
||||
#define BCMA_CORE_4706_MAC_GBIT 0x52D
|
||||
@@ -157,6 +158,9 @@ struct bcma_host_ops {
|
||||
/* Chip IDs of PCIe devices */
|
||||
#define BCMA_CHIP_ID_BCM4313 0x4313
|
||||
#define BCMA_CHIP_ID_BCM43142 43142
|
||||
#define BCMA_CHIP_ID_BCM43131 43131
|
||||
#define BCMA_CHIP_ID_BCM43217 43217
|
||||
#define BCMA_CHIP_ID_BCM43222 43222
|
||||
#define BCMA_CHIP_ID_BCM43224 43224
|
||||
#define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8
|
||||
#define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa
|
||||
@@ -333,6 +337,7 @@ struct bcma_bus {
|
||||
|
||||
struct bcma_drv_cc drv_cc;
|
||||
struct bcma_drv_pci drv_pci[2];
|
||||
struct bcma_drv_pcie2 drv_pcie2;
|
||||
struct bcma_drv_mips drv_mips;
|
||||
struct bcma_drv_gmac_cmn drv_gmac_cmn;
|
||||
|
||||
|
158
include/linux/bcma/bcma_driver_pcie2.h
Normal file
158
include/linux/bcma/bcma_driver_pcie2.h
Normal file
@@ -0,0 +1,158 @@
|
||||
#ifndef LINUX_BCMA_DRIVER_PCIE2_H_
|
||||
#define LINUX_BCMA_DRIVER_PCIE2_H_
|
||||
|
||||
#define BCMA_CORE_PCIE2_CLK_CONTROL 0x0000
|
||||
#define PCIE2_CLKC_RST_OE 0x0001 /* When set, drives PCI_RESET out to pin */
|
||||
#define PCIE2_CLKC_RST 0x0002 /* Value driven out to pin */
|
||||
#define PCIE2_CLKC_SPERST 0x0004 /* SurvivePeRst */
|
||||
#define PCIE2_CLKC_DISABLE_L1CLK_GATING 0x0010
|
||||
#define PCIE2_CLKC_DLYPERST 0x0100 /* Delay PeRst to CoE Core */
|
||||
#define PCIE2_CLKC_DISSPROMLD 0x0200 /* DisableSpromLoadOnPerst */
|
||||
#define PCIE2_CLKC_WAKE_MODE_L2 0x1000 /* Wake on L2 */
|
||||
#define BCMA_CORE_PCIE2_RC_PM_CONTROL 0x0004
|
||||
#define BCMA_CORE_PCIE2_RC_PM_STATUS 0x0008
|
||||
#define BCMA_CORE_PCIE2_EP_PM_CONTROL 0x000C
|
||||
#define BCMA_CORE_PCIE2_EP_PM_STATUS 0x0010
|
||||
#define BCMA_CORE_PCIE2_EP_LTR_CONTROL 0x0014
|
||||
#define BCMA_CORE_PCIE2_EP_LTR_STATUS 0x0018
|
||||
#define BCMA_CORE_PCIE2_EP_OBFF_STATUS 0x001C
|
||||
#define BCMA_CORE_PCIE2_PCIE_ERR_STATUS 0x0020
|
||||
#define BCMA_CORE_PCIE2_RC_AXI_CONFIG 0x0100
|
||||
#define BCMA_CORE_PCIE2_EP_AXI_CONFIG 0x0104
|
||||
#define BCMA_CORE_PCIE2_RXDEBUG_STATUS0 0x0108
|
||||
#define BCMA_CORE_PCIE2_RXDEBUG_CONTROL0 0x010C
|
||||
#define BCMA_CORE_PCIE2_CONFIGINDADDR 0x0120
|
||||
#define BCMA_CORE_PCIE2_CONFIGINDDATA 0x0124
|
||||
#define BCMA_CORE_PCIE2_MDIOCONTROL 0x0128
|
||||
#define BCMA_CORE_PCIE2_MDIOWRDATA 0x012C
|
||||
#define BCMA_CORE_PCIE2_MDIORDDATA 0x0130
|
||||
#define BCMA_CORE_PCIE2_DATAINTF 0x0180
|
||||
#define BCMA_CORE_PCIE2_D2H_INTRLAZY_0 0x0188
|
||||
#define BCMA_CORE_PCIE2_H2D_INTRLAZY_0 0x018c
|
||||
#define BCMA_CORE_PCIE2_H2D_INTSTAT_0 0x0190
|
||||
#define BCMA_CORE_PCIE2_H2D_INTMASK_0 0x0194
|
||||
#define BCMA_CORE_PCIE2_D2H_INTSTAT_0 0x0198
|
||||
#define BCMA_CORE_PCIE2_D2H_INTMASK_0 0x019c
|
||||
#define BCMA_CORE_PCIE2_LTR_STATE 0x01A0 /* Latency Tolerance Reporting */
|
||||
#define PCIE2_LTR_ACTIVE 2
|
||||
#define PCIE2_LTR_ACTIVE_IDLE 1
|
||||
#define PCIE2_LTR_SLEEP 0
|
||||
#define PCIE2_LTR_FINAL_MASK 0x300
|
||||
#define PCIE2_LTR_FINAL_SHIFT 8
|
||||
#define BCMA_CORE_PCIE2_PWR_INT_STATUS 0x01A4
|
||||
#define BCMA_CORE_PCIE2_PWR_INT_MASK 0x01A8
|
||||
#define BCMA_CORE_PCIE2_CFG_ADDR 0x01F8
|
||||
#define BCMA_CORE_PCIE2_CFG_DATA 0x01FC
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_PAGE 0x0200
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_PAGE 0x0204
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_INTREN 0x0208
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_CTRL0 0x0210
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_CTRL1 0x0214
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_CTRL2 0x0218
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_CTRL3 0x021C
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_CTRL4 0x0220
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_CTRL5 0x0224
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_HEAD0 0x0250
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_TAIL0 0x0254
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_HEAD1 0x0258
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_TAIL1 0x025C
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_HEAD2 0x0260
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_TAIL2 0x0264
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_HEAD3 0x0268
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_TAIL3 0x026C
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_HEAD4 0x0270
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_TAIL4 0x0274
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_HEAD5 0x0278
|
||||
#define BCMA_CORE_PCIE2_SYS_EQ_TAIL5 0x027C
|
||||
#define BCMA_CORE_PCIE2_SYS_RC_INTX_EN 0x0330
|
||||
#define BCMA_CORE_PCIE2_SYS_RC_INTX_CSR 0x0334
|
||||
#define BCMA_CORE_PCIE2_SYS_MSI_REQ 0x0340
|
||||
#define BCMA_CORE_PCIE2_SYS_HOST_INTR_EN 0x0344
|
||||
#define BCMA_CORE_PCIE2_SYS_HOST_INTR_CSR 0x0348
|
||||
#define BCMA_CORE_PCIE2_SYS_HOST_INTR0 0x0350
|
||||
#define BCMA_CORE_PCIE2_SYS_HOST_INTR1 0x0354
|
||||
#define BCMA_CORE_PCIE2_SYS_HOST_INTR2 0x0358
|
||||
#define BCMA_CORE_PCIE2_SYS_HOST_INTR3 0x035C
|
||||
#define BCMA_CORE_PCIE2_SYS_EP_INT_EN0 0x0360
|
||||
#define BCMA_CORE_PCIE2_SYS_EP_INT_EN1 0x0364
|
||||
#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR0 0x0370
|
||||
#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR1 0x0374
|
||||
#define BCMA_CORE_PCIE2_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2))
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_0 0x0C00
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_1 0x0C04
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_2 0x0C08
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_3 0x0C0C
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_4 0x0C10
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_5 0x0C14
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_6 0x0C18
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP0_7 0x0C1C
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_0 0x0C20
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_1 0x0C24
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_2 0x0C28
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_3 0x0C2C
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_4 0x0C30
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_5 0x0C34
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_6 0x0C38
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP0_7 0x0C3C
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP1 0x0C80
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP1 0x0C88
|
||||
#define BCMA_CORE_PCIE2_FUNC0_IMAP2 0x0CC0
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IMAP2 0x0CC8
|
||||
#define BCMA_CORE_PCIE2_IARR0_LOWER 0x0D00
|
||||
#define BCMA_CORE_PCIE2_IARR0_UPPER 0x0D04
|
||||
#define BCMA_CORE_PCIE2_IARR1_LOWER 0x0D08
|
||||
#define BCMA_CORE_PCIE2_IARR1_UPPER 0x0D0C
|
||||
#define BCMA_CORE_PCIE2_IARR2_LOWER 0x0D10
|
||||
#define BCMA_CORE_PCIE2_IARR2_UPPER 0x0D14
|
||||
#define BCMA_CORE_PCIE2_OARR0 0x0D20
|
||||
#define BCMA_CORE_PCIE2_OARR1 0x0D28
|
||||
#define BCMA_CORE_PCIE2_OARR2 0x0D30
|
||||
#define BCMA_CORE_PCIE2_OMAP0_LOWER 0x0D40
|
||||
#define BCMA_CORE_PCIE2_OMAP0_UPPER 0x0D44
|
||||
#define BCMA_CORE_PCIE2_OMAP1_LOWER 0x0D48
|
||||
#define BCMA_CORE_PCIE2_OMAP1_UPPER 0x0D4C
|
||||
#define BCMA_CORE_PCIE2_OMAP2_LOWER 0x0D50
|
||||
#define BCMA_CORE_PCIE2_OMAP2_UPPER 0x0D54
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IARR1_SIZE 0x0D58
|
||||
#define BCMA_CORE_PCIE2_FUNC1_IARR2_SIZE 0x0D5C
|
||||
#define BCMA_CORE_PCIE2_MEM_CONTROL 0x0F00
|
||||
#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG0 0x0F04
|
||||
#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG1 0x0F08
|
||||
#define BCMA_CORE_PCIE2_LINK_STATUS 0x0F0C
|
||||
#define BCMA_CORE_PCIE2_STRAP_STATUS 0x0F10
|
||||
#define BCMA_CORE_PCIE2_RESET_STATUS 0x0F14
|
||||
#define BCMA_CORE_PCIE2_RESETEN_IN_LINKDOWN 0x0F18
|
||||
#define BCMA_CORE_PCIE2_MISC_INTR_EN 0x0F1C
|
||||
#define BCMA_CORE_PCIE2_TX_DEBUG_CFG 0x0F20
|
||||
#define BCMA_CORE_PCIE2_MISC_CONFIG 0x0F24
|
||||
#define BCMA_CORE_PCIE2_MISC_STATUS 0x0F28
|
||||
#define BCMA_CORE_PCIE2_INTR_EN 0x0F30
|
||||
#define BCMA_CORE_PCIE2_INTR_CLEAR 0x0F34
|
||||
#define BCMA_CORE_PCIE2_INTR_STATUS 0x0F38
|
||||
|
||||
/* PCIE gen2 config regs */
|
||||
#define PCIE2_INTSTATUS 0x090
|
||||
#define PCIE2_INTMASK 0x094
|
||||
#define PCIE2_SBMBX 0x098
|
||||
|
||||
#define PCIE2_PMCR_REFUP 0x1814 /* Trefup time */
|
||||
|
||||
#define PCIE2_CAP_DEVSTSCTRL2_OFFSET 0xD4
|
||||
#define PCIE2_CAP_DEVSTSCTRL2_LTRENAB 0x400
|
||||
#define PCIE2_PVT_REG_PM_CLK_PERIOD 0x184c
|
||||
|
||||
struct bcma_drv_pcie2 {
|
||||
struct bcma_device *core;
|
||||
};
|
||||
|
||||
#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset)
|
||||
#define pcie2_read32(pcie2, offset) bcma_read32((pcie2)->core, offset)
|
||||
#define pcie2_write16(pcie2, offset, val) bcma_write16((pcie2)->core, offset, val)
|
||||
#define pcie2_write32(pcie2, offset, val) bcma_write32((pcie2)->core, offset, val)
|
||||
|
||||
#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set)
|
||||
#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask)
|
||||
|
||||
void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
|
||||
|
||||
#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
|
@@ -308,6 +308,7 @@ struct bio_integrity_payload {
|
||||
|
||||
unsigned short bip_slab; /* slab the bip came from */
|
||||
unsigned short bip_vcnt; /* # of integrity bio_vecs */
|
||||
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
|
||||
unsigned bip_owns_buf:1; /* should free bip_buf */
|
||||
|
||||
struct work_struct bip_work; /* I/O completion */
|
||||
|
@@ -88,32 +88,32 @@
|
||||
* lib/bitmap.c provides these functions:
|
||||
*/
|
||||
|
||||
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
|
||||
extern int __bitmap_full(const unsigned long *bitmap, int bits);
|
||||
extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
|
||||
extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
|
||||
extern int __bitmap_equal(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
int bits);
|
||||
unsigned int nbits);
|
||||
extern void __bitmap_shift_right(unsigned long *dst,
|
||||
const unsigned long *src, int shift, int bits);
|
||||
extern void __bitmap_shift_left(unsigned long *dst,
|
||||
const unsigned long *src, int shift, int bits);
|
||||
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_intersects(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_subset(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
|
||||
|
||||
extern void bitmap_set(unsigned long *map, int i, int len);
|
||||
extern void bitmap_clear(unsigned long *map, int start, int nr);
|
||||
extern void bitmap_set(unsigned long *map, unsigned int start, int len);
|
||||
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
|
||||
extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
unsigned long size,
|
||||
unsigned long start,
|
||||
@@ -140,9 +140,9 @@ extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
|
||||
const unsigned long *relmap, int bits);
|
||||
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
|
||||
int sz, int bits);
|
||||
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
|
||||
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
|
||||
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
|
||||
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
|
||||
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
|
||||
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
|
||||
|
||||
@@ -188,15 +188,15 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
|
||||
}
|
||||
|
||||
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return (*dst = *src1 & *src2) != 0;
|
||||
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
return __bitmap_and(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src1 | *src2;
|
||||
@@ -205,7 +205,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src1 ^ *src2;
|
||||
@@ -214,24 +214,24 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return (*dst = *src1 & ~(*src2)) != 0;
|
||||
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
return __bitmap_andnot(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
int nbits)
|
||||
unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
|
||||
*dst = ~(*src);
|
||||
else
|
||||
__bitmap_complement(dst, src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_equal(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -240,7 +240,7 @@ static inline int bitmap_equal(const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_intersects(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
@@ -249,7 +249,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_subset(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -257,7 +257,7 @@ static inline int bitmap_subset(const unsigned long *src1,
|
||||
return __bitmap_subset(src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_empty(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -265,7 +265,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
|
||||
return __bitmap_empty(src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_full(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -273,7 +273,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
|
||||
return __bitmap_full(src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_weight(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -284,7 +284,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
|
||||
const unsigned long *src, int n, int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src >> n;
|
||||
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
|
||||
else
|
||||
__bitmap_shift_right(dst, src, n, nbits);
|
||||
}
|
||||
|
@@ -21,6 +21,7 @@
|
||||
#include <linux/bsg.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
|
||||
@@ -470,6 +471,7 @@ struct request_queue {
|
||||
struct mutex sysfs_lock;
|
||||
|
||||
int bypass_depth;
|
||||
int mq_freeze_depth;
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
bsg_job_fn *bsg_job_fn;
|
||||
@@ -483,7 +485,7 @@ struct request_queue {
|
||||
#endif
|
||||
struct rcu_head rcu_head;
|
||||
wait_queue_head_t mq_freeze_wq;
|
||||
struct percpu_counter mq_usage_counter;
|
||||
struct percpu_ref mq_usage_counter;
|
||||
struct list_head all_q_node;
|
||||
|
||||
struct blk_mq_tag_set *tag_set;
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#define _LINUX_BYTEORDER_GENERIC_H
|
||||
|
||||
/*
|
||||
* linux/byteorder_generic.h
|
||||
* linux/byteorder/generic.h
|
||||
* Generic Byte-reordering support
|
||||
*
|
||||
* The "... p" macros, like le64_to_cpup, can be used with pointers
|
||||
|
@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
|
||||
# error Fix up hand-coded capability macro initializers
|
||||
#else /* HAND-CODED capability initializers */
|
||||
|
||||
#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
|
||||
#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
|
||||
|
||||
# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
|
||||
# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
|
||||
# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
|
||||
# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
|
||||
| CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
|
||||
CAP_FS_MASK_B1 } })
|
||||
|
@@ -285,19 +285,9 @@ extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
|
||||
|
||||
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
||||
bool can_fail);
|
||||
extern void ceph_msg_kfree(struct ceph_msg *m);
|
||||
|
||||
|
||||
static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
|
||||
{
|
||||
kref_get(&msg->kref);
|
||||
return msg;
|
||||
}
|
||||
extern void ceph_msg_last_put(struct kref *kref);
|
||||
static inline void ceph_msg_put(struct ceph_msg *msg)
|
||||
{
|
||||
kref_put(&msg->kref, ceph_msg_last_put);
|
||||
}
|
||||
extern struct ceph_msg *ceph_msg_get(struct ceph_msg *msg);
|
||||
extern void ceph_msg_put(struct ceph_msg *msg);
|
||||
|
||||
extern void ceph_msg_dump(struct ceph_msg *msg);
|
||||
|
||||
|
@@ -117,7 +117,7 @@ struct ceph_osd_request {
|
||||
struct list_head r_req_lru_item;
|
||||
struct list_head r_osd_item;
|
||||
struct list_head r_linger_item;
|
||||
struct list_head r_linger_osd;
|
||||
struct list_head r_linger_osd_item;
|
||||
struct ceph_osd *r_osd;
|
||||
struct ceph_pg r_pgid;
|
||||
int r_pg_osds[CEPH_PG_MAX_SIZE];
|
||||
@@ -325,22 +325,14 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
|
||||
|
||||
extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
|
||||
static inline void ceph_osdc_get_request(struct ceph_osd_request *req)
|
||||
{
|
||||
kref_get(&req->r_kref);
|
||||
}
|
||||
extern void ceph_osdc_release_request(struct kref *kref);
|
||||
static inline void ceph_osdc_put_request(struct ceph_osd_request *req)
|
||||
{
|
||||
kref_put(&req->r_kref, ceph_osdc_release_request);
|
||||
}
|
||||
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
|
||||
|
||||
extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req,
|
||||
bool nofail);
|
||||
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
|
||||
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
|
||||
|
@@ -203,7 +203,15 @@ struct cgroup {
|
||||
struct kernfs_node *kn; /* cgroup kernfs entry */
|
||||
struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
|
||||
|
||||
/* the bitmask of subsystems enabled on the child cgroups */
|
||||
/*
|
||||
* The bitmask of subsystems enabled on the child cgroups.
|
||||
* ->subtree_control is the one configured through
|
||||
* "cgroup.subtree_control" while ->child_subsys_mask is the
|
||||
* effective one which may have more subsystems enabled.
|
||||
* Controller knobs are made available iff it's enabled in
|
||||
* ->subtree_control.
|
||||
*/
|
||||
unsigned int subtree_control;
|
||||
unsigned int child_subsys_mask;
|
||||
|
||||
/* Private pointers for each registered subsystem */
|
||||
@@ -248,73 +256,9 @@ struct cgroup {
|
||||
|
||||
/* cgroup_root->flags */
|
||||
enum {
|
||||
/*
|
||||
* Unfortunately, cgroup core and various controllers are riddled
|
||||
* with idiosyncrasies and pointless options. The following flag,
|
||||
* when set, will force sane behavior - some options are forced on,
|
||||
* others are disallowed, and some controllers will change their
|
||||
* hierarchical or other behaviors.
|
||||
*
|
||||
* The set of behaviors affected by this flag are still being
|
||||
* determined and developed and the mount option for this flag is
|
||||
* prefixed with __DEVEL__. The prefix will be dropped once we
|
||||
* reach the point where all behaviors are compatible with the
|
||||
* planned unified hierarchy, which will automatically turn on this
|
||||
* flag.
|
||||
*
|
||||
* The followings are the behaviors currently affected this flag.
|
||||
*
|
||||
* - Mount options "noprefix", "xattr", "clone_children",
|
||||
* "release_agent" and "name" are disallowed.
|
||||
*
|
||||
* - When mounting an existing superblock, mount options should
|
||||
* match.
|
||||
*
|
||||
* - Remount is disallowed.
|
||||
*
|
||||
* - rename(2) is disallowed.
|
||||
*
|
||||
* - "tasks" is removed. Everything should be at process
|
||||
* granularity. Use "cgroup.procs" instead.
|
||||
*
|
||||
* - "cgroup.procs" is not sorted. pids will be unique unless they
|
||||
* got recycled inbetween reads.
|
||||
*
|
||||
* - "release_agent" and "notify_on_release" are removed.
|
||||
* Replacement notification mechanism will be implemented.
|
||||
*
|
||||
* - "cgroup.clone_children" is removed.
|
||||
*
|
||||
* - "cgroup.subtree_populated" is available. Its value is 0 if
|
||||
* the cgroup and its descendants contain no task; otherwise, 1.
|
||||
* The file also generates kernfs notification which can be
|
||||
* monitored through poll and [di]notify when the value of the
|
||||
* file changes.
|
||||
*
|
||||
* - If mount is requested with sane_behavior but without any
|
||||
* subsystem, the default unified hierarchy is mounted.
|
||||
*
|
||||
* - cpuset: tasks will be kept in empty cpusets when hotplug happens
|
||||
* and take masks of ancestors with non-empty cpus/mems, instead of
|
||||
* being moved to an ancestor.
|
||||
*
|
||||
* - cpuset: a task can be moved into an empty cpuset, and again it
|
||||
* takes masks of ancestors.
|
||||
*
|
||||
* - memcg: use_hierarchy is on by default and the cgroup file for
|
||||
* the flag is not created.
|
||||
*
|
||||
* - blkcg: blk-throttle becomes properly hierarchical.
|
||||
*
|
||||
* - debug: disallowed on the default hierarchy.
|
||||
*/
|
||||
CGRP_ROOT_SANE_BEHAVIOR = (1 << 0),
|
||||
|
||||
CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
|
||||
CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
|
||||
CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
|
||||
|
||||
/* mount options live below bit 16 */
|
||||
CGRP_ROOT_OPTION_MASK = (1 << 16) - 1,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -440,9 +384,11 @@ struct css_set {
|
||||
enum {
|
||||
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
|
||||
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
|
||||
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
|
||||
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
|
||||
CFTYPE_ONLY_ON_DFL = (1 << 4), /* only on default hierarchy */
|
||||
|
||||
/* internal flags, do not use outside cgroup core proper */
|
||||
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
|
||||
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
|
||||
};
|
||||
|
||||
#define MAX_CFTYPE_NAME 64
|
||||
@@ -526,20 +472,64 @@ struct cftype {
|
||||
extern struct cgroup_root cgrp_dfl_root;
|
||||
extern struct css_set init_css_set;
|
||||
|
||||
/**
|
||||
* cgroup_on_dfl - test whether a cgroup is on the default hierarchy
|
||||
* @cgrp: the cgroup of interest
|
||||
*
|
||||
* The default hierarchy is the v2 interface of cgroup and this function
|
||||
* can be used to test whether a cgroup is on the default hierarchy for
|
||||
* cases where a subsystem should behave differnetly depending on the
|
||||
* interface version.
|
||||
*
|
||||
* The set of behaviors which change on the default hierarchy are still
|
||||
* being determined and the mount option is prefixed with __DEVEL__.
|
||||
*
|
||||
* List of changed behaviors:
|
||||
*
|
||||
* - Mount options "noprefix", "xattr", "clone_children", "release_agent"
|
||||
* and "name" are disallowed.
|
||||
*
|
||||
* - When mounting an existing superblock, mount options should match.
|
||||
*
|
||||
* - Remount is disallowed.
|
||||
*
|
||||
* - rename(2) is disallowed.
|
||||
*
|
||||
* - "tasks" is removed. Everything should be at process granularity. Use
|
||||
* "cgroup.procs" instead.
|
||||
*
|
||||
* - "cgroup.procs" is not sorted. pids will be unique unless they got
|
||||
* recycled inbetween reads.
|
||||
*
|
||||
* - "release_agent" and "notify_on_release" are removed. Replacement
|
||||
* notification mechanism will be implemented.
|
||||
*
|
||||
* - "cgroup.clone_children" is removed.
|
||||
*
|
||||
* - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
|
||||
* and its descendants contain no task; otherwise, 1. The file also
|
||||
* generates kernfs notification which can be monitored through poll and
|
||||
* [di]notify when the value of the file changes.
|
||||
*
|
||||
* - cpuset: tasks will be kept in empty cpusets when hotplug happens and
|
||||
* take masks of ancestors with non-empty cpus/mems, instead of being
|
||||
* moved to an ancestor.
|
||||
*
|
||||
* - cpuset: a task can be moved into an empty cpuset, and again it takes
|
||||
* masks of ancestors.
|
||||
*
|
||||
* - memcg: use_hierarchy is on by default and the cgroup file for the flag
|
||||
* is not created.
|
||||
*
|
||||
* - blkcg: blk-throttle becomes properly hierarchical.
|
||||
*
|
||||
* - debug: disallowed on the default hierarchy.
|
||||
*/
|
||||
static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
|
||||
{
|
||||
return cgrp->root == &cgrp_dfl_root;
|
||||
}
|
||||
|
||||
/*
|
||||
* See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
|
||||
* function can be called as long as @cgrp is accessible.
|
||||
*/
|
||||
static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
|
||||
{
|
||||
return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
|
||||
}
|
||||
|
||||
/* no synchronization, the result can only be used as a hint */
|
||||
static inline bool cgroup_has_tasks(struct cgroup *cgrp)
|
||||
{
|
||||
@@ -602,7 +592,8 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
|
||||
|
||||
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
|
||||
|
||||
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
|
||||
int cgroup_rm_cftypes(struct cftype *cfts);
|
||||
|
||||
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
|
||||
@@ -634,6 +625,7 @@ struct cgroup_subsys {
|
||||
int (*css_online)(struct cgroup_subsys_state *css);
|
||||
void (*css_offline)(struct cgroup_subsys_state *css);
|
||||
void (*css_free)(struct cgroup_subsys_state *css);
|
||||
void (*css_reset)(struct cgroup_subsys_state *css);
|
||||
|
||||
int (*can_attach)(struct cgroup_subsys_state *css,
|
||||
struct cgroup_taskset *tset);
|
||||
@@ -682,8 +674,21 @@ struct cgroup_subsys {
|
||||
*/
|
||||
struct list_head cfts;
|
||||
|
||||
/* base cftypes, automatically registered with subsys itself */
|
||||
struct cftype *base_cftypes;
|
||||
/*
|
||||
* Base cftypes which are automatically registered. The two can
|
||||
* point to the same array.
|
||||
*/
|
||||
struct cftype *dfl_cftypes; /* for the default hierarchy */
|
||||
struct cftype *legacy_cftypes; /* for the legacy hierarchies */
|
||||
|
||||
/*
|
||||
* A subsystem may depend on other subsystems. When such subsystem
|
||||
* is enabled on a cgroup, the depended-upon subsystems are enabled
|
||||
* together if available. Subsystems enabled due to dependency are
|
||||
* not visible to userland until explicitly enabled. The following
|
||||
* specifies the mask of subsystems that this one depends on.
|
||||
*/
|
||||
unsigned int depends_on;
|
||||
};
|
||||
|
||||
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
|
||||
|
@@ -619,5 +619,10 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
|
||||
|
||||
#endif /* platform dependent I/O accessors */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
|
||||
void *data, const struct file_operations *fops);
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_COMMON_CLK */
|
||||
#endif /* CLK_PROVIDER_H */
|
||||
|
20
include/linux/clk/clk-conf.h
Normal file
20
include/linux/clk/clk-conf.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
|
||||
* Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
struct device_node;
|
||||
|
||||
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
|
||||
int of_clk_set_defaults(struct device_node *node, bool clk_supplier);
|
||||
#else
|
||||
static inline int of_clk_set_defaults(struct device_node *node,
|
||||
bool clk_supplier)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
@@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
|
||||
* @archdata: arch-specific data
|
||||
* @suspend: suspend function for the clocksource, if necessary
|
||||
* @resume: resume function for the clocksource, if necessary
|
||||
* @cycle_last: most recent cycle counter value seen by ::read()
|
||||
* @owner: module reference, must be set by clocksource in modules
|
||||
*/
|
||||
struct clocksource {
|
||||
@@ -171,7 +170,6 @@ struct clocksource {
|
||||
* clocksource itself is cacheline aligned.
|
||||
*/
|
||||
cycle_t (*read)(struct clocksource *cs);
|
||||
cycle_t cycle_last;
|
||||
cycle_t mask;
|
||||
u32 mult;
|
||||
u32 shift;
|
||||
|
27
include/linux/cma.h
Normal file
27
include/linux/cma.h
Normal file
@@ -0,0 +1,27 @@
|
||||
#ifndef __CMA_H__
|
||||
#define __CMA_H__
|
||||
|
||||
/*
|
||||
* There is always at least global CMA area and a few optional
|
||||
* areas configured in kernel .config.
|
||||
*/
|
||||
#ifdef CONFIG_CMA_AREAS
|
||||
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||||
|
||||
#else
|
||||
#define MAX_CMA_AREAS (0)
|
||||
|
||||
#endif
|
||||
|
||||
struct cma;
|
||||
|
||||
extern phys_addr_t cma_get_base(struct cma *cma);
|
||||
extern unsigned long cma_get_size(struct cma *cma);
|
||||
|
||||
extern int __init cma_declare_contiguous(phys_addr_t size,
|
||||
phys_addr_t base, phys_addr_t limit,
|
||||
phys_addr_t alignment, unsigned int order_per_bit,
|
||||
bool fixed, struct cma **res_cma);
|
||||
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
|
||||
extern bool cma_release(struct cma *cma, struct page *pages, int count);
|
||||
#endif
|
@@ -29,4 +29,11 @@ void component_master_del(struct device *,
|
||||
int component_master_add_child(struct master *master,
|
||||
int (*compare)(struct device *, void *), void *compare_data);
|
||||
|
||||
struct component_match;
|
||||
|
||||
int component_master_add_with_match(struct device *,
|
||||
const struct component_master_ops *, struct component_match *);
|
||||
void component_match_add(struct device *, struct component_match **,
|
||||
int (*compare)(struct device *, void *), void *compare_data);
|
||||
|
||||
#endif
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#define LINUX_CPER_H
|
||||
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/trace_seq.h>
|
||||
|
||||
/* CPER record signature and the size */
|
||||
#define CPER_SIG_RECORD "CPER"
|
||||
@@ -35,6 +36,13 @@
|
||||
*/
|
||||
#define CPER_RECORD_REV 0x0100
|
||||
|
||||
/*
|
||||
* CPER record length contains the CPER fields which are relevant for further
|
||||
* handling of a memory error in userspace (we don't carry all the fields
|
||||
* defined in the UEFI spec because some of them don't make any sense.)
|
||||
* Currently, a length of 256 should be more than enough.
|
||||
*/
|
||||
#define CPER_REC_LEN 256
|
||||
/*
|
||||
* Severity difinition for error_severity in struct cper_record_header
|
||||
* and section_severity in struct cper_section_descriptor
|
||||
@@ -356,6 +364,24 @@ struct cper_sec_mem_err {
|
||||
__u16 mem_dev_handle; /* module handle in UEFI 2.4 */
|
||||
};
|
||||
|
||||
struct cper_mem_err_compact {
|
||||
__u64 validation_bits;
|
||||
__u16 node;
|
||||
__u16 card;
|
||||
__u16 module;
|
||||
__u16 bank;
|
||||
__u16 device;
|
||||
__u16 row;
|
||||
__u16 column;
|
||||
__u16 bit_pos;
|
||||
__u64 requestor_id;
|
||||
__u64 responder_id;
|
||||
__u64 target_id;
|
||||
__u16 rank;
|
||||
__u16 mem_array_handle;
|
||||
__u16 mem_dev_handle;
|
||||
};
|
||||
|
||||
struct cper_sec_pcie {
|
||||
__u64 validation_bits;
|
||||
__u32 port_type;
|
||||
@@ -395,7 +421,13 @@ struct cper_sec_pcie {
|
||||
#pragma pack()
|
||||
|
||||
u64 cper_next_record_id(void);
|
||||
const char *cper_severity_str(unsigned int);
|
||||
const char *cper_mem_err_type_str(unsigned int);
|
||||
void cper_print_bits(const char *prefix, unsigned int bits,
|
||||
const char * const strs[], unsigned int strs_size);
|
||||
void cper_mem_err_pack(const struct cper_sec_mem_err *,
|
||||
struct cper_mem_err_compact *);
|
||||
const char *cper_mem_err_unpack(struct trace_seq *,
|
||||
struct cper_mem_err_compact *);
|
||||
|
||||
#endif
|
||||
|
@@ -176,6 +176,7 @@ static inline void disable_cpufreq(void) { }
|
||||
|
||||
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
|
||||
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
|
||||
#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
|
||||
|
||||
struct freq_attr {
|
||||
struct attribute attr;
|
||||
|
@@ -8,8 +8,8 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitrev.h>
|
||||
|
||||
extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
|
||||
extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
|
||||
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
|
||||
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
|
||||
|
||||
/**
|
||||
* crc32_le_combine - Combine two crc32 check values into one. For two
|
||||
@@ -29,9 +29,14 @@ extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
|
||||
* with the same initializer as crc1, and crc2 seed was 0. See
|
||||
* also crc32_combine_test().
|
||||
*/
|
||||
extern u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2);
|
||||
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
|
||||
|
||||
extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len);
|
||||
static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
|
||||
{
|
||||
return crc32_le_shift(crc1, len2) ^ crc2;
|
||||
}
|
||||
|
||||
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
|
||||
|
||||
/**
|
||||
* __crc32c_le_combine - Combine two crc32c check values into one. For two
|
||||
@@ -51,7 +56,12 @@ extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len);
|
||||
* seeded with the same initializer as crc1, and crc2 seed
|
||||
* was 0. See also crc32c_combine_test().
|
||||
*/
|
||||
extern u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2);
|
||||
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
|
||||
|
||||
static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
|
||||
{
|
||||
return __crc32c_le_shift(crc1, len2) ^ crc2;
|
||||
}
|
||||
|
||||
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
|
||||
|
||||
|
@@ -258,6 +258,15 @@ static inline void put_cred(const struct cred *_cred)
|
||||
#define current_cred() \
|
||||
rcu_dereference_protected(current->cred, 1)
|
||||
|
||||
/**
|
||||
* current_real_cred - Access the current task's objective credentials
|
||||
*
|
||||
* Access the objective credentials of the current task. RCU-safe,
|
||||
* since nobody else can modify it.
|
||||
*/
|
||||
#define current_real_cred() \
|
||||
rcu_dereference_protected(current->real_cred, 1)
|
||||
|
||||
/**
|
||||
* __task_cred - Access a task's objective credentials
|
||||
* @task: The task to query
|
||||
|
@@ -710,9 +710,9 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
|
||||
|
||||
static inline void ablkcipher_request_set_callback(
|
||||
struct ablkcipher_request *req,
|
||||
u32 flags, crypto_completion_t complete, void *data)
|
||||
u32 flags, crypto_completion_t compl, void *data)
|
||||
{
|
||||
req->base.complete = complete;
|
||||
req->base.complete = compl;
|
||||
req->base.data = data;
|
||||
req->base.flags = flags;
|
||||
}
|
||||
@@ -841,10 +841,10 @@ static inline void aead_request_free(struct aead_request *req)
|
||||
|
||||
static inline void aead_request_set_callback(struct aead_request *req,
|
||||
u32 flags,
|
||||
crypto_completion_t complete,
|
||||
crypto_completion_t compl,
|
||||
void *data)
|
||||
{
|
||||
req->base.complete = complete;
|
||||
req->base.complete = compl;
|
||||
req->base.data = data;
|
||||
req->base.flags = flags;
|
||||
}
|
||||
|
@@ -249,6 +249,7 @@ extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
|
||||
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
|
||||
extern struct dentry *d_find_any_alias(struct inode *inode);
|
||||
extern struct dentry * d_obtain_alias(struct inode *);
|
||||
extern struct dentry * d_obtain_root(struct inode *);
|
||||
extern void shrink_dcache_sb(struct super_block *);
|
||||
extern void shrink_dcache_parent(struct dentry *);
|
||||
extern void shrink_dcache_for_umount(struct super_block *);
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef DECOMPRESS_BUNZIP2_H
|
||||
#define DECOMPRESS_BUNZIP2_H
|
||||
|
||||
int bunzip2(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int bunzip2(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error)(char *x));
|
||||
#endif
|
||||
|
@@ -1,11 +1,11 @@
|
||||
#ifndef DECOMPRESS_GENERIC_H
|
||||
#define DECOMPRESS_GENERIC_H
|
||||
|
||||
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
typedef int (*decompress_fn) (unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *outbuf,
|
||||
int *posp,
|
||||
long *posp,
|
||||
void(*error)(char *x));
|
||||
|
||||
/* inbuf - input buffer
|
||||
@@ -33,7 +33,7 @@ typedef int (*decompress_fn) (unsigned char *inbuf, int len,
|
||||
|
||||
|
||||
/* Utility routine to detect the decompression method */
|
||||
decompress_fn decompress_method(const unsigned char *inbuf, int len,
|
||||
decompress_fn decompress_method(const unsigned char *inbuf, long len,
|
||||
const char **name);
|
||||
|
||||
#endif
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef LINUX_DECOMPRESS_INFLATE_H
|
||||
#define LINUX_DECOMPRESS_INFLATE_H
|
||||
|
||||
int gunzip(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int gunzip(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error_fn)(char *x));
|
||||
#endif
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef DECOMPRESS_UNLZ4_H
|
||||
#define DECOMPRESS_UNLZ4_H
|
||||
|
||||
int unlz4(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int unlz4(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error)(char *x));
|
||||
#endif
|
||||
|
@@ -1,11 +1,11 @@
|
||||
#ifndef DECOMPRESS_UNLZMA_H
|
||||
#define DECOMPRESS_UNLZMA_H
|
||||
|
||||
int unlzma(unsigned char *, int,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int unlzma(unsigned char *, long,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *posp,
|
||||
long *posp,
|
||||
void(*error)(char *x)
|
||||
);
|
||||
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef DECOMPRESS_UNLZO_H
|
||||
#define DECOMPRESS_UNLZO_H
|
||||
|
||||
int unlzo(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int unlzo(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error)(char *x));
|
||||
#endif
|
||||
|
@@ -10,10 +10,10 @@
|
||||
#ifndef DECOMPRESS_UNXZ_H
|
||||
#define DECOMPRESS_UNXZ_H
|
||||
|
||||
int unxz(unsigned char *in, int in_size,
|
||||
int (*fill)(void *dest, unsigned int size),
|
||||
int (*flush)(void *src, unsigned int size),
|
||||
unsigned char *out, int *in_used,
|
||||
int unxz(unsigned char *in, long in_size,
|
||||
long (*fill)(void *dest, unsigned long size),
|
||||
long (*flush)(void *src, unsigned long size),
|
||||
unsigned char *out, long *in_used,
|
||||
void (*error)(char *x));
|
||||
|
||||
#endif
|
||||
|
@@ -124,7 +124,7 @@ struct bus_type {
|
||||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
struct iommu_ops *iommu_ops;
|
||||
const struct iommu_ops *iommu_ops;
|
||||
|
||||
struct subsys_private *p;
|
||||
struct lock_class_key lock_key;
|
||||
@@ -605,6 +605,10 @@ extern int devres_release_group(struct device *dev, void *id);
|
||||
|
||||
/* managed devm_k.alloc/kfree for device drivers */
|
||||
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
|
||||
extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
|
||||
va_list ap);
|
||||
extern char *devm_kasprintf(struct device *dev, gfp_t gfp,
|
||||
const char *fmt, ...);
|
||||
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
|
||||
{
|
||||
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
|
||||
@@ -631,8 +635,6 @@ extern unsigned long devm_get_free_pages(struct device *dev,
|
||||
extern void devm_free_pages(struct device *dev, unsigned long addr);
|
||||
|
||||
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
|
||||
void __iomem *devm_request_and_ioremap(struct device *dev,
|
||||
struct resource *res);
|
||||
|
||||
/* allows to add/remove a custom action to devres stack */
|
||||
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
|
||||
|
@@ -30,6 +30,8 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fence.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct device;
|
||||
struct dma_buf;
|
||||
@@ -115,6 +117,7 @@ struct dma_buf_ops {
|
||||
* @exp_name: name of the exporter; useful for debugging.
|
||||
* @list_node: node for dma_buf accounting and debugging.
|
||||
* @priv: exporter specific private data for this buffer object.
|
||||
* @resv: reservation object linked to this dma-buf
|
||||
*/
|
||||
struct dma_buf {
|
||||
size_t size;
|
||||
@@ -128,6 +131,17 @@ struct dma_buf {
|
||||
const char *exp_name;
|
||||
struct list_head list_node;
|
||||
void *priv;
|
||||
struct reservation_object *resv;
|
||||
|
||||
/* poll support */
|
||||
wait_queue_head_t poll;
|
||||
|
||||
struct dma_buf_poll_cb_t {
|
||||
struct fence_cb cb;
|
||||
wait_queue_head_t *poll;
|
||||
|
||||
unsigned long active;
|
||||
} cb_excl, cb_shared;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -168,10 +182,11 @@ void dma_buf_detach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *dmabuf_attach);
|
||||
|
||||
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
||||
size_t size, int flags, const char *);
|
||||
size_t size, int flags, const char *,
|
||||
struct reservation_object *);
|
||||
|
||||
#define dma_buf_export(priv, ops, size, flags) \
|
||||
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
|
||||
#define dma_buf_export(priv, ops, size, flags, resv) \
|
||||
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
|
||||
|
||||
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
|
||||
struct dma_buf *dma_buf_get(int fd);
|
||||
|
@@ -53,18 +53,13 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
struct cma;
|
||||
struct page;
|
||||
struct device;
|
||||
|
||||
#ifdef CONFIG_DMA_CMA
|
||||
|
||||
/*
|
||||
* There is always at least global CMA area and a few optional device
|
||||
* private areas configured in kernel .config.
|
||||
*/
|
||||
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||||
|
||||
extern struct cma *dma_contiguous_default_area;
|
||||
|
||||
static inline struct cma *dev_get_cma_area(struct device *dev)
|
||||
@@ -123,8 +118,6 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
|
||||
#else
|
||||
|
||||
#define MAX_CMA_AREAS (0)
|
||||
|
||||
static inline struct cma *dev_get_cma_area(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
|
@@ -37,7 +37,6 @@
|
||||
*/
|
||||
typedef s32 dma_cookie_t;
|
||||
#define DMA_MIN_COOKIE 1
|
||||
#define DMA_MAX_COOKIE INT_MAX
|
||||
|
||||
static inline int dma_submit_error(dma_cookie_t cookie)
|
||||
{
|
||||
@@ -299,6 +298,7 @@ enum dma_slave_buswidth {
|
||||
DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
|
||||
DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
|
||||
DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
|
||||
DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
|
||||
DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
|
||||
DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
|
||||
};
|
||||
@@ -670,7 +670,7 @@ struct dma_device {
|
||||
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
|
||||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags, void *context);
|
||||
unsigned long flags);
|
||||
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
|
||||
struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||||
unsigned long flags);
|
||||
@@ -745,7 +745,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
|
||||
unsigned long flags)
|
||||
{
|
||||
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
|
||||
period_len, dir, flags, NULL);
|
||||
period_len, dir, flags);
|
||||
}
|
||||
|
||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
||||
|
@@ -114,22 +114,30 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
|
||||
/* Intel IOMMU detection */
|
||||
extern int detect_intel_iommu(void);
|
||||
extern int enable_drhd_fault_handling(void);
|
||||
#else
|
||||
struct dmar_pci_notify_info;
|
||||
static inline int detect_intel_iommu(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int dmar_table_init(void)
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int iommu_detected, no_iommu;
|
||||
extern int intel_iommu_init(void);
|
||||
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
||||
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
||||
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
|
||||
#else /* !CONFIG_INTEL_IOMMU: */
|
||||
static inline int intel_iommu_init(void) { return -ENODEV; }
|
||||
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
static inline int enable_drhd_fault_handling(void)
|
||||
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
#endif /* !CONFIG_DMAR_TABLE */
|
||||
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_INTEL_IOMMU */
|
||||
|
||||
#endif /* CONFIG_DMAR_TABLE */
|
||||
|
||||
struct irte {
|
||||
union {
|
||||
@@ -177,26 +185,4 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu);
|
||||
extern irqreturn_t dmar_fault(int irq, void *dev_id);
|
||||
extern int arch_setup_dmar_msi(unsigned int irq);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int iommu_detected, no_iommu;
|
||||
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
||||
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
||||
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
|
||||
extern int intel_iommu_init(void);
|
||||
#else /* !CONFIG_INTEL_IOMMU: */
|
||||
static inline int intel_iommu_init(void) { return -ENODEV; }
|
||||
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_INTEL_IOMMU */
|
||||
|
||||
#endif /* __DMAR_H__ */
|
||||
|
@@ -52,7 +52,7 @@
|
||||
#endif
|
||||
|
||||
extern const char *drbd_buildtag(void);
|
||||
#define REL_VERSION "8.4.3"
|
||||
#define REL_VERSION "8.4.5"
|
||||
#define API_VERSION 1
|
||||
#define PRO_VERSION_MIN 86
|
||||
#define PRO_VERSION_MAX 101
|
||||
@@ -245,7 +245,7 @@ enum drbd_disk_state {
|
||||
D_DISKLESS,
|
||||
D_ATTACHING, /* In the process of reading the meta-data */
|
||||
D_FAILED, /* Becomes D_DISKLESS as soon as we told it the peer */
|
||||
/* when >= D_FAILED it is legal to access mdev->bc */
|
||||
/* when >= D_FAILED it is legal to access mdev->ldev */
|
||||
D_NEGOTIATING, /* Late attaching state, we need to talk to the peer */
|
||||
D_INCONSISTENT,
|
||||
D_OUTDATED,
|
||||
|
@@ -171,6 +171,10 @@ GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf,
|
||||
__flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative)
|
||||
__flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF)
|
||||
/* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */
|
||||
/* 9: __str_field_def(31, DRBD_GENLA_F_MANDATORY, name, SHARED_SECRET_MAX) */
|
||||
/* 9: __u32_field(32, DRBD_F_REQUIRED | DRBD_F_INVARIANT, peer_node_id) */
|
||||
__flg_field_def(33, 0 /* OPTIONAL */, csums_after_crash_only, DRBD_CSUMS_AFTER_CRASH_ONLY_DEF)
|
||||
__u32_field_def(34, 0 /* OPTIONAL */, sock_check_timeo, DRBD_SOCKET_CHECK_TIMEO_DEF)
|
||||
)
|
||||
|
||||
GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms,
|
||||
|
@@ -214,6 +214,7 @@
|
||||
#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
|
||||
#define DRBD_ALWAYS_ASBP_DEF 0
|
||||
#define DRBD_USE_RLE_DEF 1
|
||||
#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
|
||||
|
||||
#define DRBD_AL_STRIPES_MIN 1
|
||||
#define DRBD_AL_STRIPES_MAX 1024
|
||||
@@ -224,4 +225,9 @@
|
||||
#define DRBD_AL_STRIPE_SIZE_MAX 16777216
|
||||
#define DRBD_AL_STRIPE_SIZE_DEF 32
|
||||
#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
|
||||
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_MIN 0
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
|
||||
#endif
|
||||
|
@@ -194,6 +194,9 @@ static inline char *mc_event_error_type(const unsigned int err_type)
|
||||
* @MEM_DDR3: DDR3 RAM
|
||||
* @MEM_RDDR3: Registered DDR3 RAM
|
||||
* This is a variant of the DDR3 memories.
|
||||
* @MEM_DDR4: DDR4 RAM
|
||||
* @MEM_RDDR4: Registered DDR4 RAM
|
||||
* This is a variant of the DDR4 memories.
|
||||
*/
|
||||
enum mem_type {
|
||||
MEM_EMPTY = 0,
|
||||
@@ -213,6 +216,8 @@ enum mem_type {
|
||||
MEM_XDR,
|
||||
MEM_DDR3,
|
||||
MEM_RDDR3,
|
||||
MEM_DDR4,
|
||||
MEM_RDDR4,
|
||||
};
|
||||
|
||||
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/pstore.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
@@ -521,6 +522,8 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
|
||||
int *reset_type);
|
||||
typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
|
||||
|
||||
void efi_native_runtime_setup(void);
|
||||
|
||||
/*
|
||||
* EFI Configuration Table and GUID definitions
|
||||
*/
|
||||
@@ -870,11 +873,13 @@ extern int __init efi_uart_console_only (void);
|
||||
extern void efi_initialize_iomem_resources(struct resource *code_resource,
|
||||
struct resource *data_resource, struct resource *bss_resource);
|
||||
extern void efi_get_time(struct timespec *now);
|
||||
extern int efi_set_rtc_mmss(const struct timespec *now);
|
||||
extern void efi_reserve_boot_services(void);
|
||||
extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
|
||||
extern struct efi_memory_map memmap;
|
||||
|
||||
extern int efi_reboot_quirk_mode;
|
||||
extern bool efi_poweroff_required(void);
|
||||
|
||||
/* Iterate through an efi_memory_map */
|
||||
#define for_each_efi_memory_desc(m, md) \
|
||||
for ((md) = (m)->map; \
|
||||
@@ -916,7 +921,8 @@ extern int __init efi_setup_pcdp_console(char *);
|
||||
#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
|
||||
#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
|
||||
#define EFI_64BIT 5 /* Is the firmware 64-bit? */
|
||||
#define EFI_ARCH_1 6 /* First arch-specific bit */
|
||||
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
|
||||
#define EFI_ARCH_1 7 /* First arch-specific bit */
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
/*
|
||||
@@ -926,11 +932,14 @@ static inline bool efi_enabled(int feature)
|
||||
{
|
||||
return test_bit(feature, &efi.flags) != 0;
|
||||
}
|
||||
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
|
||||
#else
|
||||
static inline bool efi_enabled(int feature)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void
|
||||
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -1031,12 +1040,8 @@ struct efivar_operations {
|
||||
struct efivars {
|
||||
/*
|
||||
* ->lock protects two things:
|
||||
* 1) ->list - adds, removals, reads, writes
|
||||
* 2) ops.[gs]et_variable() calls.
|
||||
* It must not be held when creating sysfs entries or calling kmalloc.
|
||||
* ops.get_next_variable() is only called from register_efivars()
|
||||
* or efivar_update_sysfs_entries(),
|
||||
* which is protected by the BKL, so that path is safe.
|
||||
* 1) efivarfs_list and efivars_sysfs_list
|
||||
* 2) ->ops calls
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct kset *kset;
|
||||
@@ -1151,6 +1156,9 @@ int efivars_sysfs_init(void);
|
||||
#ifdef CONFIG_EFI_RUNTIME_MAP
|
||||
int efi_runtime_map_init(struct kobject *);
|
||||
void efi_runtime_map_setup(void *, int, u32);
|
||||
int efi_get_runtime_map_size(void);
|
||||
int efi_get_runtime_map_desc_size(void);
|
||||
int efi_runtime_map_copy(void *buf, size_t bufsz);
|
||||
#else
|
||||
static inline int efi_runtime_map_init(struct kobject *kobj)
|
||||
{
|
||||
@@ -1159,6 +1167,64 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
|
||||
|
||||
static inline void
|
||||
efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
|
||||
|
||||
static inline int efi_get_runtime_map_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int efi_get_runtime_map_desc_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* prototypes shared between arch specific and generic stub code */
|
||||
|
||||
#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg)
|
||||
#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
|
||||
|
||||
void efi_printk(efi_system_table_t *sys_table_arg, char *str);
|
||||
|
||||
void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
|
||||
unsigned long addr);
|
||||
|
||||
char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
|
||||
efi_loaded_image_t *image, int *cmd_line_len);
|
||||
|
||||
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
|
||||
efi_memory_desc_t **map,
|
||||
unsigned long *map_size,
|
||||
unsigned long *desc_size,
|
||||
u32 *desc_ver,
|
||||
unsigned long *key_ptr);
|
||||
|
||||
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
|
||||
unsigned long size, unsigned long align,
|
||||
unsigned long *addr);
|
||||
|
||||
efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
|
||||
unsigned long size, unsigned long align,
|
||||
unsigned long *addr, unsigned long max);
|
||||
|
||||
efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
|
||||
unsigned long *image_addr,
|
||||
unsigned long image_size,
|
||||
unsigned long alloc_size,
|
||||
unsigned long preferred_addr,
|
||||
unsigned long alignment);
|
||||
|
||||
efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
|
||||
efi_loaded_image_t *image,
|
||||
char *cmd_line, char *option_string,
|
||||
unsigned long max_addr,
|
||||
unsigned long *load_addr,
|
||||
unsigned long *load_size);
|
||||
|
||||
#endif /* _LINUX_EFI_H */
|
||||
|
287
include/linux/extcon/sm5502.h
Normal file
287
include/linux/extcon/sm5502.h
Normal file
@@ -0,0 +1,287 @@
|
||||
/*
|
||||
* sm5502.h
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_EXTCON_SM5502_H
|
||||
#define __LINUX_EXTCON_SM5502_H
|
||||
|
||||
enum sm5502_types {
|
||||
TYPE_SM5502,
|
||||
};
|
||||
|
||||
/* SM5502 registers */
|
||||
enum sm5502_reg {
|
||||
SM5502_REG_DEVICE_ID = 0x01,
|
||||
SM5502_REG_CONTROL,
|
||||
SM5502_REG_INT1,
|
||||
SM5502_REG_INT2,
|
||||
SM5502_REG_INTMASK1,
|
||||
SM5502_REG_INTMASK2,
|
||||
SM5502_REG_ADC,
|
||||
SM5502_REG_TIMING_SET1,
|
||||
SM5502_REG_TIMING_SET2,
|
||||
SM5502_REG_DEV_TYPE1,
|
||||
SM5502_REG_DEV_TYPE2,
|
||||
SM5502_REG_BUTTON1,
|
||||
SM5502_REG_BUTTON2,
|
||||
SM5502_REG_CAR_KIT_STATUS,
|
||||
SM5502_REG_RSVD1,
|
||||
SM5502_REG_RSVD2,
|
||||
SM5502_REG_RSVD3,
|
||||
SM5502_REG_RSVD4,
|
||||
SM5502_REG_MANUAL_SW1,
|
||||
SM5502_REG_MANUAL_SW2,
|
||||
SM5502_REG_DEV_TYPE3,
|
||||
SM5502_REG_RSVD5,
|
||||
SM5502_REG_RSVD6,
|
||||
SM5502_REG_RSVD7,
|
||||
SM5502_REG_RSVD8,
|
||||
SM5502_REG_RSVD9,
|
||||
SM5502_REG_RESET,
|
||||
SM5502_REG_RSVD10,
|
||||
SM5502_REG_RESERVED_ID1,
|
||||
SM5502_REG_RSVD11,
|
||||
SM5502_REG_RSVD12,
|
||||
SM5502_REG_RESERVED_ID2,
|
||||
SM5502_REG_RSVD13,
|
||||
SM5502_REG_OCP,
|
||||
SM5502_REG_RSVD14,
|
||||
SM5502_REG_RSVD15,
|
||||
SM5502_REG_RSVD16,
|
||||
SM5502_REG_RSVD17,
|
||||
SM5502_REG_RSVD18,
|
||||
SM5502_REG_RSVD19,
|
||||
SM5502_REG_RSVD20,
|
||||
SM5502_REG_RSVD21,
|
||||
SM5502_REG_RSVD22,
|
||||
SM5502_REG_RSVD23,
|
||||
SM5502_REG_RSVD24,
|
||||
SM5502_REG_RSVD25,
|
||||
SM5502_REG_RSVD26,
|
||||
SM5502_REG_RSVD27,
|
||||
SM5502_REG_RSVD28,
|
||||
SM5502_REG_RSVD29,
|
||||
SM5502_REG_RSVD30,
|
||||
SM5502_REG_RSVD31,
|
||||
SM5502_REG_RSVD32,
|
||||
SM5502_REG_RSVD33,
|
||||
SM5502_REG_RSVD34,
|
||||
SM5502_REG_RSVD35,
|
||||
SM5502_REG_RSVD36,
|
||||
SM5502_REG_RESERVED_ID3,
|
||||
|
||||
SM5502_REG_END,
|
||||
};
|
||||
|
||||
/* Define SM5502 MASK/SHIFT constant */
|
||||
#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0
|
||||
#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3
|
||||
#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT)
|
||||
#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT)
|
||||
|
||||
#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0
|
||||
#define SM5502_REG_CONTROL_WAIT_SHIFT 1
|
||||
#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2
|
||||
#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3
|
||||
#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4
|
||||
#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT)
|
||||
#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT)
|
||||
#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT)
|
||||
#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
|
||||
#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
|
||||
|
||||
#define SM5502_REG_INTM1_ATTACH_SHIFT 0
|
||||
#define SM5502_REG_INTM1_DETACH_SHIFT 1
|
||||
#define SM5502_REG_INTM1_KP_SHIFT 2
|
||||
#define SM5502_REG_INTM1_LKP_SHIFT 3
|
||||
#define SM5502_REG_INTM1_LKR_SHIFT 4
|
||||
#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5
|
||||
#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6
|
||||
#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7
|
||||
#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT)
|
||||
#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT)
|
||||
#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT)
|
||||
#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT)
|
||||
#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT)
|
||||
#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT)
|
||||
#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT)
|
||||
#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT)
|
||||
|
||||
#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0
|
||||
#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1
|
||||
#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4
|
||||
#define SM5502_REG_INTM2_MHL_SHIFT 5
|
||||
#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT)
|
||||
#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT)
|
||||
#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT)
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT)
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
|
||||
#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
|
||||
|
||||
#define SM5502_REG_ADC_SHIFT 0
|
||||
#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT)
|
||||
|
||||
#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4
|
||||
#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT)
|
||||
#define TIMING_KEY_PRESS_100MS 0x0
|
||||
#define TIMING_KEY_PRESS_200MS 0x1
|
||||
#define TIMING_KEY_PRESS_300MS 0x2
|
||||
#define TIMING_KEY_PRESS_400MS 0x3
|
||||
#define TIMING_KEY_PRESS_500MS 0x4
|
||||
#define TIMING_KEY_PRESS_600MS 0x5
|
||||
#define TIMING_KEY_PRESS_700MS 0x6
|
||||
#define TIMING_KEY_PRESS_800MS 0x7
|
||||
#define TIMING_KEY_PRESS_900MS 0x8
|
||||
#define TIMING_KEY_PRESS_1000MS 0x9
|
||||
#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0
|
||||
#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT)
|
||||
#define TIMING_ADC_DET_50MS 0x0
|
||||
#define TIMING_ADC_DET_100MS 0x1
|
||||
#define TIMING_ADC_DET_150MS 0x2
|
||||
#define TIMING_ADC_DET_200MS 0x3
|
||||
#define TIMING_ADC_DET_300MS 0x4
|
||||
#define TIMING_ADC_DET_400MS 0x5
|
||||
#define TIMING_ADC_DET_500MS 0x6
|
||||
#define TIMING_ADC_DET_600MS 0x7
|
||||
#define TIMING_ADC_DET_700MS 0x8
|
||||
#define TIMING_ADC_DET_800MS 0x9
|
||||
#define TIMING_ADC_DET_900MS 0xA
|
||||
#define TIMING_ADC_DET_1000MS 0xB
|
||||
|
||||
#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4
|
||||
#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT)
|
||||
#define TIMING_SW_WAIT_10MS 0x0
|
||||
#define TIMING_SW_WAIT_30MS 0x1
|
||||
#define TIMING_SW_WAIT_50MS 0x2
|
||||
#define TIMING_SW_WAIT_70MS 0x3
|
||||
#define TIMING_SW_WAIT_90MS 0x4
|
||||
#define TIMING_SW_WAIT_110MS 0x5
|
||||
#define TIMING_SW_WAIT_130MS 0x6
|
||||
#define TIMING_SW_WAIT_150MS 0x7
|
||||
#define TIMING_SW_WAIT_170MS 0x8
|
||||
#define TIMING_SW_WAIT_190MS 0x9
|
||||
#define TIMING_SW_WAIT_210MS 0xA
|
||||
#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0
|
||||
#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT)
|
||||
#define TIMING_LONG_KEY_300MS 0x0
|
||||
#define TIMING_LONG_KEY_400MS 0x1
|
||||
#define TIMING_LONG_KEY_500MS 0x2
|
||||
#define TIMING_LONG_KEY_600MS 0x3
|
||||
#define TIMING_LONG_KEY_700MS 0x4
|
||||
#define TIMING_LONG_KEY_800MS 0x5
|
||||
#define TIMING_LONG_KEY_900MS 0x6
|
||||
#define TIMING_LONG_KEY_1000MS 0x7
|
||||
#define TIMING_LONG_KEY_1100MS 0x8
|
||||
#define TIMING_LONG_KEY_1200MS 0x9
|
||||
#define TIMING_LONG_KEY_1300MS 0xA
|
||||
#define TIMING_LONG_KEY_1400MS 0xB
|
||||
#define TIMING_LONG_KEY_1500MS 0xC
|
||||
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1
|
||||
#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2
|
||||
#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3
|
||||
#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4
|
||||
#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5
|
||||
#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6
|
||||
#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
|
||||
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3
|
||||
#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4
|
||||
#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5
|
||||
#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT)
|
||||
|
||||
#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0
|
||||
#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2
|
||||
#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5
|
||||
#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT)
|
||||
#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT)
|
||||
#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT)
|
||||
#define VBUSIN_SWITCH_OPEN 0x0
|
||||
#define VBUSIN_SWITCH_VBUSOUT 0x1
|
||||
#define VBUSIN_SWITCH_MIC 0x2
|
||||
#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3
|
||||
#define DM_DP_CON_SWITCH_OPEN 0x0
|
||||
#define DM_DP_CON_SWITCH_USB 0x1
|
||||
#define DM_DP_CON_SWITCH_AUDIO 0x2
|
||||
#define DM_DP_CON_SWITCH_UART 0x3
|
||||
#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
|
||||
/* SM5502 Interrupts */
|
||||
enum sm5502_irq {
|
||||
/* INT1 */
|
||||
SM5502_IRQ_INT1_ATTACH,
|
||||
SM5502_IRQ_INT1_DETACH,
|
||||
SM5502_IRQ_INT1_KP,
|
||||
SM5502_IRQ_INT1_LKP,
|
||||
SM5502_IRQ_INT1_LKR,
|
||||
SM5502_IRQ_INT1_OVP_EVENT,
|
||||
SM5502_IRQ_INT1_OCP_EVENT,
|
||||
SM5502_IRQ_INT1_OVP_OCP_DIS,
|
||||
|
||||
/* INT2 */
|
||||
SM5502_IRQ_INT2_VBUS_DET,
|
||||
SM5502_IRQ_INT2_REV_ACCE,
|
||||
SM5502_IRQ_INT2_ADC_CHG,
|
||||
SM5502_IRQ_INT2_STUCK_KEY,
|
||||
SM5502_IRQ_INT2_STUCK_KEY_RCV,
|
||||
SM5502_IRQ_INT2_MHL,
|
||||
|
||||
SM5502_IRQ_NUM,
|
||||
};
|
||||
|
||||
#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0)
|
||||
#define SM5502_IRQ_INT1_DETACH_MASK BIT(1)
|
||||
#define SM5502_IRQ_INT1_KP_MASK BIT(2)
|
||||
#define SM5502_IRQ_INT1_LKP_MASK BIT(3)
|
||||
#define SM5502_IRQ_INT1_LKR_MASK BIT(4)
|
||||
#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5)
|
||||
#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6)
|
||||
#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7)
|
||||
#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0)
|
||||
#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1)
|
||||
#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2)
|
||||
#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3)
|
||||
#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4)
|
||||
#define SM5502_IRQ_INT2_MHL_MASK BIT(5)
|
||||
|
||||
#endif /* __LINUX_EXTCON_SM5502_H */
|
@@ -553,7 +553,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
|
||||
#define fb_memcpy_fromfb sbus_memcpy_fromio
|
||||
#define fb_memcpy_tofb sbus_memcpy_toio
|
||||
|
||||
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
|
||||
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
|
||||
|
||||
#define fb_readb __raw_readb
|
||||
#define fb_readw __raw_readw
|
||||
|
360
include/linux/fence.h
Normal file
360
include/linux/fence.h
Normal file
@@ -0,0 +1,360 @@
|
||||
/*
|
||||
* Fence mechanism for dma-buf to allow for asynchronous dma access
|
||||
*
|
||||
* Copyright (C) 2012 Canonical Ltd
|
||||
* Copyright (C) 2012 Texas Instruments
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_FENCE_H
|
||||
#define __LINUX_FENCE_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct fence;
|
||||
struct fence_ops;
|
||||
struct fence_cb;
|
||||
|
||||
/**
|
||||
* struct fence - software synchronization primitive
|
||||
* @refcount: refcount for this fence
|
||||
* @ops: fence_ops associated with this fence
|
||||
* @rcu: used for releasing fence with kfree_rcu
|
||||
* @cb_list: list of all callbacks to call
|
||||
* @lock: spin_lock_irqsave used for locking
|
||||
* @context: execution context this fence belongs to, returned by
|
||||
* fence_context_alloc()
|
||||
* @seqno: the sequence number of this fence inside the execution context,
|
||||
* can be compared to decide which fence would be signaled later.
|
||||
* @flags: A mask of FENCE_FLAG_* defined below
|
||||
* @timestamp: Timestamp when the fence was signaled.
|
||||
* @status: Optional, only valid if < 0, must be set before calling
|
||||
* fence_signal, indicates that the fence has completed with an error.
|
||||
*
|
||||
* the flags member must be manipulated and read using the appropriate
|
||||
* atomic ops (bit_*), so taking the spinlock will not be needed most
|
||||
* of the time.
|
||||
*
|
||||
* FENCE_FLAG_SIGNALED_BIT - fence is already signaled
|
||||
* FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
|
||||
* FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
|
||||
* implementer of the fence for its own purposes. Can be used in different
|
||||
* ways by different fence implementers, so do not rely on this.
|
||||
*
|
||||
* *) Since atomic bitops are used, this is not guaranteed to be the case.
|
||||
* Particularly, if the bit was set, but fence_signal was called right
|
||||
* before this bit was set, it would have been able to set the
|
||||
* FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
|
||||
* Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
|
||||
* FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
|
||||
* after fence_signal was called, any enable_signaling call will have either
|
||||
* been completed, or never called at all.
|
||||
*/
|
||||
struct fence {
|
||||
struct kref refcount;
|
||||
const struct fence_ops *ops;
|
||||
struct rcu_head rcu;
|
||||
struct list_head cb_list;
|
||||
spinlock_t *lock;
|
||||
unsigned context, seqno;
|
||||
unsigned long flags;
|
||||
ktime_t timestamp;
|
||||
int status;
|
||||
};
|
||||
|
||||
enum fence_flag_bits {
|
||||
FENCE_FLAG_SIGNALED_BIT,
|
||||
FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
FENCE_FLAG_USER_BITS, /* must always be last member */
|
||||
};
|
||||
|
||||
typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
|
||||
|
||||
/**
|
||||
* struct fence_cb - callback for fence_add_callback
|
||||
* @node: used by fence_add_callback to append this struct to fence::cb_list
|
||||
* @func: fence_func_t to call
|
||||
*
|
||||
* This struct will be initialized by fence_add_callback, additional
|
||||
* data can be passed along by embedding fence_cb in another struct.
|
||||
*/
|
||||
struct fence_cb {
|
||||
struct list_head node;
|
||||
fence_func_t func;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fence_ops - operations implemented for fence
|
||||
* @get_driver_name: returns the driver name.
|
||||
* @get_timeline_name: return the name of the context this fence belongs to.
|
||||
* @enable_signaling: enable software signaling of fence.
|
||||
* @signaled: [optional] peek whether the fence is signaled, can be null.
|
||||
* @wait: custom wait implementation, or fence_default_wait.
|
||||
* @release: [optional] called on destruction of fence, can be null
|
||||
* @fill_driver_data: [optional] callback to fill in free-form debug info
|
||||
* Returns amount of bytes filled, or -errno.
|
||||
* @fence_value_str: [optional] fills in the value of the fence as a string
|
||||
* @timeline_value_str: [optional] fills in the current value of the timeline
|
||||
* as a string
|
||||
*
|
||||
* Notes on enable_signaling:
|
||||
* For fence implementations that have the capability for hw->hw
|
||||
* signaling, they can implement this op to enable the necessary
|
||||
* irqs, or insert commands into cmdstream, etc. This is called
|
||||
* in the first wait() or add_callback() path to let the fence
|
||||
* implementation know that there is another driver waiting on
|
||||
* the signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
* or some failure occured that made it impossible to enable
|
||||
* signaling. True indicates succesful enabling.
|
||||
*
|
||||
* fence->status may be set in enable_signaling, but only when false is
|
||||
* returned.
|
||||
*
|
||||
* Calling fence_signal before enable_signaling is called allows
|
||||
* for a tiny race window in which enable_signaling is called during,
|
||||
* before, or after fence_signal. To fight this, it is recommended
|
||||
* that before enable_signaling returns true an extra reference is
|
||||
* taken on the fence, to be released when the fence is signaled.
|
||||
* This will mean fence_signal will still be called twice, but
|
||||
* the second time will be a noop since it was already signaled.
|
||||
*
|
||||
* Notes on signaled:
|
||||
* May set fence->status if returning true.
|
||||
*
|
||||
* Notes on wait:
|
||||
* Must not be NULL, set to fence_default_wait for default implementation.
|
||||
* the fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
* timed out. Can also return other error values on custom implementations,
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* Notes on release:
|
||||
* Can be NULL, this function allows additional commands to run on
|
||||
* destruction of the fence. Can be called from irq context.
|
||||
* If pointer is set to NULL, kfree will get called instead.
|
||||
*/
|
||||
|
||||
struct fence_ops {
|
||||
const char * (*get_driver_name)(struct fence *fence);
|
||||
const char * (*get_timeline_name)(struct fence *fence);
|
||||
bool (*enable_signaling)(struct fence *fence);
|
||||
bool (*signaled)(struct fence *fence);
|
||||
signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
|
||||
void (*release)(struct fence *fence);
|
||||
|
||||
int (*fill_driver_data)(struct fence *fence, void *data, int size);
|
||||
void (*fence_value_str)(struct fence *fence, char *str, int size);
|
||||
void (*timeline_value_str)(struct fence *fence, char *str, int size);
|
||||
};
|
||||
|
||||
void fence_init(struct fence *fence, const struct fence_ops *ops,
|
||||
spinlock_t *lock, unsigned context, unsigned seqno);
|
||||
|
||||
void fence_release(struct kref *kref);
|
||||
void fence_free(struct fence *fence);
|
||||
|
||||
/**
|
||||
* fence_get - increases refcount of the fence
|
||||
* @fence: [in] fence to increase refcount of
|
||||
*
|
||||
* Returns the same fence, with refcount increased by 1.
|
||||
*/
|
||||
static inline struct fence *fence_get(struct fence *fence)
|
||||
{
|
||||
if (fence)
|
||||
kref_get(&fence->refcount);
|
||||
return fence;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
|
||||
* @fence: [in] fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
*/
|
||||
static inline struct fence *fence_get_rcu(struct fence *fence)
|
||||
{
|
||||
if (kref_get_unless_zero(&fence->refcount))
|
||||
return fence;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_put - decreases refcount of the fence
|
||||
* @fence: [in] fence to reduce refcount of
|
||||
*/
|
||||
static inline void fence_put(struct fence *fence)
|
||||
{
|
||||
if (fence)
|
||||
kref_put(&fence->refcount, fence_release);
|
||||
}
|
||||
|
||||
int fence_signal(struct fence *fence);
|
||||
int fence_signal_locked(struct fence *fence);
|
||||
signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
|
||||
int fence_add_callback(struct fence *fence, struct fence_cb *cb,
|
||||
fence_func_t func);
|
||||
bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
|
||||
void fence_enable_sw_signaling(struct fence *fence);
|
||||
|
||||
/**
|
||||
* fence_is_signaled_locked - Return an indication if the fence is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling
|
||||
* haven't been called before.
|
||||
*
|
||||
* This function requires fence->lock to be held.
|
||||
*/
|
||||
static inline bool
|
||||
fence_is_signaled_locked(struct fence *fence)
|
||||
{
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
|
||||
if (fence->ops->signaled && fence->ops->signaled(fence)) {
|
||||
fence_signal_locked(fence);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_is_signaled - Return an indication if the fence is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling
|
||||
* haven't been called before.
|
||||
*
|
||||
* It's recommended for seqno fences to call fence_signal when the
|
||||
* operation is complete, it makes it possible to prevent issues from
|
||||
* wraparound between time of issue and time of use by checking the return
|
||||
* value of this function before calling hardware-specific wait instructions.
|
||||
*/
|
||||
static inline bool
|
||||
fence_is_signaled(struct fence *fence)
|
||||
{
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
|
||||
if (fence->ops->signaled && fence->ops->signaled(fence)) {
|
||||
fence_signal(fence);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_later - return the chronologically later fence
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
*
|
||||
* Returns NULL if both fences are signaled, otherwise the fence that would be
|
||||
* signaled last. Both fences must be from the same context, since a seqno is
|
||||
* not re-used across contexts.
|
||||
*/
|
||||
static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
|
||||
{
|
||||
if (WARN_ON(f1->context != f2->context))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
|
||||
* set if enable_signaling wasn't called, and enabling that here is
|
||||
* overkill.
|
||||
*/
|
||||
if (f2->seqno - f1->seqno <= INT_MAX)
|
||||
return fence_is_signaled(f2) ? NULL : f2;
|
||||
else
|
||||
return fence_is_signaled(f1) ? NULL : f1;
|
||||
}
|
||||
|
||||
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
|
||||
|
||||
|
||||
/**
|
||||
* fence_wait - sleep until the fence gets signaled
|
||||
* @fence: [in] the fence to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
*
|
||||
* This function will return -ERESTARTSYS if interrupted by a signal,
|
||||
* or 0 if the fence was signaled. Other error values may be
|
||||
* returned on custom implementations.
|
||||
*
|
||||
* Performs a synchronous wait on this fence. It is assumed the caller
|
||||
* directly or indirectly holds a reference to the fence, otherwise the
|
||||
* fence might be freed before return, resulting in undefined behavior.
|
||||
*/
|
||||
static inline signed long fence_wait(struct fence *fence, bool intr)
|
||||
{
|
||||
signed long ret;
|
||||
|
||||
/* Since fence_wait_timeout cannot timeout with
|
||||
* MAX_SCHEDULE_TIMEOUT, only valid return values are
|
||||
* -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
|
||||
*/
|
||||
ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
unsigned fence_context_alloc(unsigned num);
|
||||
|
||||
#define FENCE_TRACE(f, fmt, args...) \
|
||||
do { \
|
||||
struct fence *__ff = (f); \
|
||||
if (config_enabled(CONFIG_FENCE_TRACE)) \
|
||||
pr_info("f %u#%u: " fmt, \
|
||||
__ff->context, __ff->seqno, ##args); \
|
||||
} while (0)
|
||||
|
||||
#define FENCE_WARN(f, fmt, args...) \
|
||||
do { \
|
||||
struct fence *__ff = (f); \
|
||||
pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
|
||||
##args); \
|
||||
} while (0)
|
||||
|
||||
#define FENCE_ERR(f, fmt, args...) \
|
||||
do { \
|
||||
struct fence *__ff = (f); \
|
||||
pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
|
||||
##args); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __LINUX_FENCE_H */
|
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <uapi/linux/filter.h>
|
||||
|
||||
@@ -81,7 +82,7 @@ enum {
|
||||
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
|
||||
|
||||
#define BPF_ALU64_REG(OP, DST, SRC) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -89,7 +90,7 @@ enum {
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_ALU32_REG(OP, DST, SRC) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -99,7 +100,7 @@ enum {
|
||||
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
|
||||
|
||||
#define BPF_ALU64_IMM(OP, DST, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
@@ -107,7 +108,7 @@ enum {
|
||||
.imm = IMM })
|
||||
|
||||
#define BPF_ALU32_IMM(OP, DST, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
@@ -117,7 +118,7 @@ enum {
|
||||
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
|
||||
|
||||
#define BPF_ENDIAN(TYPE, DST, LEN) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
@@ -127,7 +128,7 @@ enum {
|
||||
/* Short form of mov, dst_reg = src_reg */
|
||||
|
||||
#define BPF_MOV64_REG(DST, SRC) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -135,7 +136,7 @@ enum {
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_MOV32_REG(DST, SRC) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_MOV | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -145,7 +146,7 @@ enum {
|
||||
/* Short form of mov, dst_reg = imm32 */
|
||||
|
||||
#define BPF_MOV64_IMM(DST, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_MOV | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
@@ -153,7 +154,7 @@ enum {
|
||||
.imm = IMM })
|
||||
|
||||
#define BPF_MOV32_IMM(DST, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_MOV | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
@@ -163,7 +164,7 @@ enum {
|
||||
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
|
||||
|
||||
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -171,7 +172,7 @@ enum {
|
||||
.imm = IMM })
|
||||
|
||||
#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -181,7 +182,7 @@ enum {
|
||||
/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
|
||||
|
||||
#define BPF_LD_ABS(SIZE, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
@@ -191,7 +192,7 @@ enum {
|
||||
/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
|
||||
|
||||
#define BPF_LD_IND(SIZE, SRC, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = SRC, \
|
||||
@@ -201,7 +202,7 @@ enum {
|
||||
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
|
||||
|
||||
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -211,7 +212,7 @@ enum {
|
||||
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
|
||||
|
||||
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -221,7 +222,7 @@ enum {
|
||||
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
|
||||
|
||||
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
@@ -231,7 +232,7 @@ enum {
|
||||
/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
|
||||
|
||||
#define BPF_JMP_REG(OP, DST, SRC, OFF) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_OP(OP) | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -241,7 +242,7 @@ enum {
|
||||
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
|
||||
|
||||
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
@@ -251,7 +252,7 @@ enum {
|
||||
/* Function call */
|
||||
|
||||
#define BPF_EMIT_CALL(FUNC) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_CALL, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
@@ -261,7 +262,7 @@ enum {
|
||||
/* Raw code statement block */
|
||||
|
||||
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = CODE, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
@@ -271,7 +272,7 @@ enum {
|
||||
/* Program exit */
|
||||
|
||||
#define BPF_EXIT_INSN() \
|
||||
((struct sock_filter_int) { \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_JMP | BPF_EXIT, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
@@ -295,9 +296,10 @@ enum {
|
||||
})
|
||||
|
||||
/* Macro to invoke filter function. */
|
||||
#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
|
||||
#define SK_RUN_FILTER(filter, ctx) \
|
||||
(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
|
||||
|
||||
struct sock_filter_int {
|
||||
struct bpf_insn {
|
||||
__u8 code; /* opcode */
|
||||
__u8 dst_reg:4; /* dest register */
|
||||
__u8 src_reg:4; /* source register */
|
||||
@@ -322,54 +324,58 @@ struct sk_buff;
|
||||
struct sock;
|
||||
struct seccomp_data;
|
||||
|
||||
struct sk_filter {
|
||||
atomic_t refcnt;
|
||||
struct bpf_prog {
|
||||
u32 jited:1, /* Is our filter JIT'ed? */
|
||||
len:31; /* Number of filter blocks */
|
||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||
struct rcu_head rcu;
|
||||
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
||||
const struct sock_filter_int *filter);
|
||||
const struct bpf_insn *filter);
|
||||
union {
|
||||
struct sock_filter insns[0];
|
||||
struct sock_filter_int insnsi[0];
|
||||
struct bpf_insn insnsi[0];
|
||||
struct work_struct work;
|
||||
};
|
||||
};
|
||||
|
||||
static inline unsigned int sk_filter_size(unsigned int proglen)
|
||||
struct sk_filter {
|
||||
atomic_t refcnt;
|
||||
struct rcu_head rcu;
|
||||
struct bpf_prog *prog;
|
||||
};
|
||||
|
||||
#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
|
||||
|
||||
static inline unsigned int bpf_prog_size(unsigned int proglen)
|
||||
{
|
||||
return max(sizeof(struct sk_filter),
|
||||
offsetof(struct sk_filter, insns[proglen]));
|
||||
return max(sizeof(struct bpf_prog),
|
||||
offsetof(struct bpf_prog, insns[proglen]));
|
||||
}
|
||||
|
||||
#define sk_filter_proglen(fprog) \
|
||||
(fprog->len * sizeof(fprog->filter[0]))
|
||||
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||
|
||||
int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
void sk_filter_select_runtime(struct sk_filter *fp);
|
||||
void sk_filter_free(struct sk_filter *fp);
|
||||
void bpf_prog_select_runtime(struct bpf_prog *fp);
|
||||
void bpf_prog_free(struct bpf_prog *fp);
|
||||
|
||||
int sk_convert_filter(struct sock_filter *prog, int len,
|
||||
struct sock_filter_int *new_prog, int *new_len);
|
||||
int bpf_convert_filter(struct sock_filter *prog, int len,
|
||||
struct bpf_insn *new_prog, int *new_len);
|
||||
|
||||
int sk_unattached_filter_create(struct sk_filter **pfp,
|
||||
struct sock_fprog_kern *fprog);
|
||||
void sk_unattached_filter_destroy(struct sk_filter *fp);
|
||||
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
|
||||
void bpf_prog_destroy(struct bpf_prog *fp);
|
||||
|
||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||
int sk_detach_filter(struct sock *sk);
|
||||
|
||||
int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
|
||||
int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
|
||||
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
|
||||
unsigned int len);
|
||||
|
||||
void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
|
||||
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
|
||||
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
||||
|
||||
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
void bpf_int_jit_compile(struct sk_filter *fp);
|
||||
void bpf_int_jit_compile(struct bpf_prog *fp);
|
||||
|
||||
#define BPF_ANC BIT(15)
|
||||
|
||||
@@ -406,13 +412,25 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
|
||||
}
|
||||
}
|
||||
|
||||
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
|
||||
int k, unsigned int size);
|
||||
|
||||
static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
|
||||
unsigned int size, void *buffer)
|
||||
{
|
||||
if (k >= 0)
|
||||
return skb_header_pointer(skb, k, size, buffer);
|
||||
|
||||
return bpf_internal_load_pointer_neg_helper(skb, k, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
#include <stdarg.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/printk.h>
|
||||
|
||||
void bpf_jit_compile(struct sk_filter *fp);
|
||||
void bpf_jit_free(struct sk_filter *fp);
|
||||
void bpf_jit_compile(struct bpf_prog *fp);
|
||||
void bpf_jit_free(struct bpf_prog *fp);
|
||||
|
||||
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||
u32 pass, void *image)
|
||||
@@ -426,11 +444,11 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||
#else
|
||||
#include <linux/slab.h>
|
||||
|
||||
static inline void bpf_jit_compile(struct sk_filter *fp)
|
||||
static inline void bpf_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_jit_free(struct sk_filter *fp)
|
||||
static inline void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
kfree(fp);
|
||||
}
|
||||
|
@@ -45,6 +45,8 @@ int request_firmware_nowait(
|
||||
struct module *module, bool uevent,
|
||||
const char *name, struct device *device, gfp_t gfp, void *context,
|
||||
void (*cont)(const struct firmware *fw, void *context));
|
||||
int request_firmware_direct(const struct firmware **fw, const char *name,
|
||||
struct device *device);
|
||||
|
||||
void release_firmware(const struct firmware *fw);
|
||||
#else
|
||||
@@ -66,13 +68,12 @@ static inline void release_firmware(const struct firmware *fw)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FW_LOADER_USER_HELPER
|
||||
int request_firmware_direct(const struct firmware **fw, const char *name,
|
||||
struct device *device);
|
||||
#else
|
||||
#define request_firmware_direct request_firmware
|
||||
#endif
|
||||
static inline int request_firmware_direct(const struct firmware **fw,
|
||||
const char *name,
|
||||
struct device *device)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -387,7 +387,7 @@ struct address_space {
|
||||
struct inode *host; /* owner: inode, block_device */
|
||||
struct radix_tree_root page_tree; /* radix tree of all pages */
|
||||
spinlock_t tree_lock; /* and lock protecting it */
|
||||
unsigned int i_mmap_writable;/* count VM_SHARED mappings */
|
||||
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
|
||||
struct rb_root i_mmap; /* tree of private and shared mappings */
|
||||
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
|
||||
struct mutex i_mmap_mutex; /* protect tree, count, list */
|
||||
@@ -470,10 +470,35 @@ static inline int mapping_mapped(struct address_space *mapping)
|
||||
* Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
|
||||
* marks vma as VM_SHARED if it is shared, and the file was opened for
|
||||
* writing i.e. vma may be mprotected writable even if now readonly.
|
||||
*
|
||||
* If i_mmap_writable is negative, no new writable mappings are allowed. You
|
||||
* can only deny writable mappings, if none exists right now.
|
||||
*/
|
||||
static inline int mapping_writably_mapped(struct address_space *mapping)
|
||||
{
|
||||
return mapping->i_mmap_writable != 0;
|
||||
return atomic_read(&mapping->i_mmap_writable) > 0;
|
||||
}
|
||||
|
||||
static inline int mapping_map_writable(struct address_space *mapping)
|
||||
{
|
||||
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
|
||||
0 : -EPERM;
|
||||
}
|
||||
|
||||
static inline void mapping_unmap_writable(struct address_space *mapping)
|
||||
{
|
||||
atomic_dec(&mapping->i_mmap_writable);
|
||||
}
|
||||
|
||||
static inline int mapping_deny_writable(struct address_space *mapping)
|
||||
{
|
||||
return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
|
||||
0 : -EBUSY;
|
||||
}
|
||||
|
||||
static inline void mapping_allow_writable(struct address_space *mapping)
|
||||
{
|
||||
atomic_inc(&mapping->i_mmap_writable);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -833,7 +858,7 @@ static inline struct file *get_file(struct file *f)
|
||||
*
|
||||
* Lockd stuffs a "host" pointer into this.
|
||||
*/
|
||||
typedef struct files_struct *fl_owner_t;
|
||||
typedef void *fl_owner_t;
|
||||
|
||||
struct file_lock_operations {
|
||||
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
|
||||
@@ -1250,6 +1275,7 @@ struct super_block {
|
||||
|
||||
/* AIO completions deferred from interrupt context */
|
||||
struct workqueue_struct *s_dio_done_wq;
|
||||
struct hlist_head s_pins;
|
||||
|
||||
/*
|
||||
* Keep the lru lists last in the structure so they always sit on their
|
||||
@@ -2335,6 +2361,7 @@ extern int do_pipe_flags(int *, int);
|
||||
|
||||
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
|
||||
extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
|
||||
extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
|
||||
extern struct file * open_exec(const char *);
|
||||
|
||||
/* fs/dcache.c -- generic fs support functions */
|
||||
@@ -2688,7 +2715,7 @@ static const struct file_operations __fops = { \
|
||||
.read = simple_attr_read, \
|
||||
.write = simple_attr_write, \
|
||||
.llseek = generic_file_llseek, \
|
||||
};
|
||||
}
|
||||
|
||||
static inline __printf(1, 2)
|
||||
void __simple_attr_check_format(const char *fmt, ...)
|
||||
|
17
include/linux/fs_pin.h
Normal file
17
include/linux/fs_pin.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#include <linux/fs.h>
|
||||
|
||||
struct fs_pin {
|
||||
atomic_long_t count;
|
||||
union {
|
||||
struct {
|
||||
struct hlist_node s_list;
|
||||
struct hlist_node m_list;
|
||||
};
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
void (*kill)(struct fs_pin *);
|
||||
};
|
||||
|
||||
void pin_put(struct fs_pin *);
|
||||
void pin_remove(struct fs_pin *);
|
||||
void pin_insert(struct fs_pin *, struct vfsmount *);
|
@@ -322,16 +322,18 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
|
||||
extern void fsnotify_destroy_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event);
|
||||
/* attach the event to the group notification queue */
|
||||
extern int fsnotify_add_notify_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *));
|
||||
extern int fsnotify_add_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *));
|
||||
/* Remove passed event from groups notification queue */
|
||||
extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
|
||||
/* true if the group notification queue is empty */
|
||||
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
|
||||
/* return, but do not dequeue the first event on the notification queue */
|
||||
extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
|
||||
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
|
||||
/* return AND dequeue the first event on the notification queue */
|
||||
extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
|
||||
extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
|
||||
|
||||
/* functions used to manipulate the marks attached to inodes */
|
||||
|
||||
|
@@ -33,8 +33,7 @@
|
||||
* features, then it must call an indirect function that
|
||||
* does. Or at least does enough to prevent any unwelcomed side effects.
|
||||
*/
|
||||
#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
|
||||
!ARCH_SUPPORTS_FTRACE_OPS
|
||||
#if !ARCH_SUPPORTS_FTRACE_OPS
|
||||
# define FTRACE_FORCE_LIST_FUNC 1
|
||||
#else
|
||||
# define FTRACE_FORCE_LIST_FUNC 0
|
||||
@@ -118,17 +117,18 @@ struct ftrace_ops {
|
||||
ftrace_func_t func;
|
||||
struct ftrace_ops *next;
|
||||
unsigned long flags;
|
||||
int __percpu *disabled;
|
||||
void *private;
|
||||
int __percpu *disabled;
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
int nr_trampolines;
|
||||
struct ftrace_hash *notrace_hash;
|
||||
struct ftrace_hash *filter_hash;
|
||||
struct ftrace_hash *tramp_hash;
|
||||
struct mutex regex_lock;
|
||||
unsigned long trampoline;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern int function_trace_stop;
|
||||
|
||||
/*
|
||||
* Type of the current tracing.
|
||||
*/
|
||||
@@ -140,32 +140,6 @@ enum ftrace_tracing_type_t {
|
||||
/* Current tracing type, default is FTRACE_TYPE_ENTER */
|
||||
extern enum ftrace_tracing_type_t ftrace_tracing_type;
|
||||
|
||||
/**
|
||||
* ftrace_stop - stop function tracer.
|
||||
*
|
||||
* A quick way to stop the function tracer. Note this an on off switch,
|
||||
* it is not something that is recursive like preempt_disable.
|
||||
* This does not disable the calling of mcount, it only stops the
|
||||
* calling of functions from mcount.
|
||||
*/
|
||||
static inline void ftrace_stop(void)
|
||||
{
|
||||
function_trace_stop = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_start - start the function tracer.
|
||||
*
|
||||
* This function is the inverse of ftrace_stop. This does not enable
|
||||
* the function tracing if the function tracer is disabled. This only
|
||||
* sets the function tracer flag to continue calling the functions
|
||||
* from mcount.
|
||||
*/
|
||||
static inline void ftrace_start(void)
|
||||
{
|
||||
function_trace_stop = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The ftrace_ops must be a static and should also
|
||||
* be read_mostly. These functions do modify read_mostly variables
|
||||
@@ -242,8 +216,6 @@ static inline int ftrace_nr_registered_ops(void)
|
||||
}
|
||||
static inline void clear_ftrace_function(void) { }
|
||||
static inline void ftrace_kill(void) { }
|
||||
static inline void ftrace_stop(void) { }
|
||||
static inline void ftrace_start(void) { }
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_STACK_TRACER
|
||||
@@ -317,13 +289,20 @@ extern int ftrace_nr_registered_ops(void);
|
||||
* from tracing that function.
|
||||
*/
|
||||
enum {
|
||||
FTRACE_FL_ENABLED = (1UL << 29),
|
||||
FTRACE_FL_ENABLED = (1UL << 31),
|
||||
FTRACE_FL_REGS = (1UL << 30),
|
||||
FTRACE_FL_REGS_EN = (1UL << 31)
|
||||
FTRACE_FL_REGS_EN = (1UL << 29),
|
||||
FTRACE_FL_TRAMP = (1UL << 28),
|
||||
FTRACE_FL_TRAMP_EN = (1UL << 27),
|
||||
};
|
||||
|
||||
#define FTRACE_FL_MASK (0x7UL << 29)
|
||||
#define FTRACE_REF_MAX ((1UL << 29) - 1)
|
||||
#define FTRACE_REF_MAX_SHIFT 27
|
||||
#define FTRACE_FL_BITS 5
|
||||
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
|
||||
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
|
||||
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
|
||||
|
||||
#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
|
||||
|
||||
struct dyn_ftrace {
|
||||
unsigned long ip; /* address of mcount call-site */
|
||||
@@ -431,6 +410,10 @@ void ftrace_modify_all_code(int command);
|
||||
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
|
||||
#endif
|
||||
|
||||
#ifndef FTRACE_GRAPH_ADDR
|
||||
#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
|
||||
#endif
|
||||
|
||||
#ifndef FTRACE_REGS_ADDR
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
|
||||
@@ -439,6 +422,16 @@ void ftrace_modify_all_code(int command);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If an arch would like functions that are only traced
|
||||
* by the function graph tracer to jump directly to its own
|
||||
* trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
|
||||
* to be that address to jump to.
|
||||
*/
|
||||
#ifndef FTRACE_GRAPH_TRAMP_ADDR
|
||||
#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
extern void ftrace_graph_caller(void);
|
||||
extern int ftrace_enable_ftrace_graph_caller(void);
|
||||
@@ -736,6 +729,7 @@ extern char __irqentry_text_end[];
|
||||
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
||||
trace_func_graph_ent_t entryfunc);
|
||||
|
||||
extern bool ftrace_graph_is_dead(void);
|
||||
extern void ftrace_graph_stop(void);
|
||||
|
||||
/* The current handlers in use */
|
||||
|
@@ -272,7 +272,6 @@ struct ftrace_event_call {
|
||||
struct trace_event event;
|
||||
const char *print_fmt;
|
||||
struct event_filter *filter;
|
||||
struct list_head *files;
|
||||
void *mod;
|
||||
void *data;
|
||||
/*
|
||||
@@ -404,8 +403,6 @@ enum event_trigger_type {
|
||||
ETT_EVENT_ENABLE = (1 << 3),
|
||||
};
|
||||
|
||||
extern void destroy_preds(struct ftrace_event_file *file);
|
||||
extern void destroy_call_preds(struct ftrace_event_call *call);
|
||||
extern int filter_match_preds(struct event_filter *filter, void *rec);
|
||||
|
||||
extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
|
||||
@@ -574,40 +571,6 @@ do { \
|
||||
__trace_printk(ip, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* tracepoint_string - register constant persistent string to trace system
|
||||
* @str - a constant persistent string that will be referenced in tracepoints
|
||||
*
|
||||
* If constant strings are being used in tracepoints, it is faster and
|
||||
* more efficient to just save the pointer to the string and reference
|
||||
* that with a printf "%s" instead of saving the string in the ring buffer
|
||||
* and wasting space and time.
|
||||
*
|
||||
* The problem with the above approach is that userspace tools that read
|
||||
* the binary output of the trace buffers do not have access to the string.
|
||||
* Instead they just show the address of the string which is not very
|
||||
* useful to users.
|
||||
*
|
||||
* With tracepoint_string(), the string will be registered to the tracing
|
||||
* system and exported to userspace via the debugfs/tracing/printk_formats
|
||||
* file that maps the string address to the string text. This way userspace
|
||||
* tools that read the binary buffers have a way to map the pointers to
|
||||
* the ASCII strings they represent.
|
||||
*
|
||||
* The @str used must be a constant string and persistent as it would not
|
||||
* make sense to show a string that no longer exists. But it is still fine
|
||||
* to be used with modules, because when modules are unloaded, if they
|
||||
* had tracepoints, the ring buffers are cleared too. As long as the string
|
||||
* does not change during the life of the module, it is fine to use
|
||||
* tracepoint_string() within a module.
|
||||
*/
|
||||
#define tracepoint_string(str) \
|
||||
({ \
|
||||
static const char *___tp_str __tracepoint_string = str; \
|
||||
___tp_str; \
|
||||
})
|
||||
#define __tracepoint_string __attribute__((section("__tracepoint_str")))
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct perf_event;
|
||||
|
||||
|
@@ -360,7 +360,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
|
||||
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
|
||||
void free_pages_exact(void *virt, size_t size);
|
||||
/* This is different from alloc_pages_exact_node !!! */
|
||||
void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
||||
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
||||
|
||||
#define __get_free_page(gfp_mask) \
|
||||
__get_free_pages((gfp_mask), 0)
|
||||
|
9
include/linux/glob.h
Normal file
9
include/linux/glob.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _LINUX_GLOB_H
|
||||
#define _LINUX_GLOB_H
|
||||
|
||||
#include <linux/types.h> /* For bool */
|
||||
#include <linux/compiler.h> /* For __pure */
|
||||
|
||||
bool __pure glob_match(char const *pat, char const *str);
|
||||
|
||||
#endif /* _LINUX_GLOB_H */
|
@@ -18,30 +18,79 @@ struct gpio_desc;
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
|
||||
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
|
||||
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
|
||||
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
|
||||
|
||||
/**
|
||||
* Optional flags that can be passed to one of gpiod_* to configure direction
|
||||
* and output value. These values cannot be OR'd.
|
||||
*/
|
||||
enum gpiod_flags {
|
||||
GPIOD_ASIS = 0,
|
||||
GPIOD_IN = GPIOD_FLAGS_BIT_DIR_SET,
|
||||
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
|
||||
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
|
||||
GPIOD_FLAGS_BIT_DIR_VAL,
|
||||
};
|
||||
|
||||
/* Acquire and dispose GPIOs */
|
||||
struct gpio_desc *__must_check gpiod_get(struct device *dev,
|
||||
const char *con_id);
|
||||
struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
|
||||
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
|
||||
const char *con_id,
|
||||
enum gpiod_flags flags);
|
||||
#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
|
||||
#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
|
||||
struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
|
||||
const char *con_id,
|
||||
unsigned int idx);
|
||||
struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
|
||||
const char *con_id);
|
||||
struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
|
||||
unsigned int idx,
|
||||
enum gpiod_flags flags);
|
||||
#define __gpiod_get_index(dev, con_id, index, flags, ...) \
|
||||
__gpiod_get_index(dev, con_id, index, flags)
|
||||
#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
|
||||
struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
|
||||
const char *con_id,
|
||||
enum gpiod_flags flags);
|
||||
#define __gpiod_get_optional(dev, con_id, flags, ...) \
|
||||
__gpiod_get_optional(dev, con_id, flags)
|
||||
#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
|
||||
struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
|
||||
const char *con_id,
|
||||
unsigned int index);
|
||||
unsigned int index,
|
||||
enum gpiod_flags flags);
|
||||
#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
|
||||
__gpiod_get_index_optional(dev, con_id, index, flags)
|
||||
#define gpiod_get_index_optional(varargs...) \
|
||||
__gpiod_get_index_optional(varargs, 0)
|
||||
|
||||
void gpiod_put(struct gpio_desc *desc);
|
||||
|
||||
struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
|
||||
const char *con_id);
|
||||
struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
|
||||
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
|
||||
const char *con_id,
|
||||
enum gpiod_flags flags);
|
||||
#define __devm_gpiod_get(dev, con_id, flags, ...) \
|
||||
__devm_gpiod_get(dev, con_id, flags)
|
||||
#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
|
||||
struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
|
||||
const char *con_id,
|
||||
unsigned int idx);
|
||||
struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
|
||||
const char *con_id);
|
||||
unsigned int idx,
|
||||
enum gpiod_flags flags);
|
||||
#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
|
||||
__devm_gpiod_get_index(dev, con_id, index, flags)
|
||||
#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
|
||||
struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
|
||||
const char *con_id,
|
||||
enum gpiod_flags flags);
|
||||
#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
|
||||
__devm_gpiod_get_optional(dev, con_id, flags)
|
||||
#define devm_gpiod_get_optional(varargs...) \
|
||||
__devm_gpiod_get_optional(varargs, 0)
|
||||
struct gpio_desc *__must_check
|
||||
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
|
||||
unsigned int index);
|
||||
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
|
||||
unsigned int index, enum gpiod_flags flags);
|
||||
#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
|
||||
__devm_gpiod_get_index_optional(dev, con_id, index, flags)
|
||||
#define devm_gpiod_get_index_optional(varargs...) \
|
||||
__devm_gpiod_get_index_optional(varargs, 0)
|
||||
|
||||
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
|
||||
|
||||
|
@@ -141,73 +141,16 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
|
||||
|
||||
/* add/remove chips */
|
||||
extern int gpiochip_add(struct gpio_chip *chip);
|
||||
extern int __must_check gpiochip_remove(struct gpio_chip *chip);
|
||||
extern int gpiochip_remove(struct gpio_chip *chip);
|
||||
extern struct gpio_chip *gpiochip_find(void *data,
|
||||
int (*match)(struct gpio_chip *chip, void *data));
|
||||
|
||||
/* lock/unlock as IRQ */
|
||||
int gpiod_lock_as_irq(struct gpio_desc *desc);
|
||||
void gpiod_unlock_as_irq(struct gpio_desc *desc);
|
||||
int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
|
||||
void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
|
||||
|
||||
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
|
||||
|
||||
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
|
||||
u16 hwnum);
|
||||
|
||||
enum gpio_lookup_flags {
|
||||
GPIO_ACTIVE_HIGH = (0 << 0),
|
||||
GPIO_ACTIVE_LOW = (1 << 0),
|
||||
GPIO_OPEN_DRAIN = (1 << 1),
|
||||
GPIO_OPEN_SOURCE = (1 << 2),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gpiod_lookup - lookup table
|
||||
* @chip_label: name of the chip the GPIO belongs to
|
||||
* @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
|
||||
* @con_id: name of the GPIO from the device's point of view
|
||||
* @idx: index of the GPIO in case several GPIOs share the same name
|
||||
* @flags: mask of GPIO_* values
|
||||
*
|
||||
* gpiod_lookup is a lookup table for associating GPIOs to specific devices and
|
||||
* functions using platform data.
|
||||
*/
|
||||
struct gpiod_lookup {
|
||||
const char *chip_label;
|
||||
u16 chip_hwnum;
|
||||
const char *con_id;
|
||||
unsigned int idx;
|
||||
enum gpio_lookup_flags flags;
|
||||
};
|
||||
|
||||
struct gpiod_lookup_table {
|
||||
struct list_head list;
|
||||
const char *dev_id;
|
||||
struct gpiod_lookup table[];
|
||||
};
|
||||
|
||||
/*
|
||||
* Simple definition of a single GPIO under a con_id
|
||||
*/
|
||||
#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
|
||||
GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
|
||||
|
||||
/*
|
||||
* Use this macro if you need to have several GPIOs under the same con_id.
|
||||
* Each GPIO needs to use a different index and can be accessed using
|
||||
* gpiod_get_index()
|
||||
*/
|
||||
#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
|
||||
{ \
|
||||
.chip_label = _chip_label, \
|
||||
.chip_hwnum = _chip_hwnum, \
|
||||
.con_id = _con_id, \
|
||||
.idx = _idx, \
|
||||
.flags = _flags, \
|
||||
}
|
||||
|
||||
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
|
||||
|
||||
#ifdef CONFIG_GPIOLIB_IRQCHIP
|
||||
|
||||
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
|
||||
@@ -223,6 +166,9 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
|
||||
#endif /* CONFIG_GPIO_IRQCHIP */
|
||||
|
||||
int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label);
|
||||
void gpiochip_free_own_desc(struct gpio_desc *desc);
|
||||
|
||||
#else /* CONFIG_GPIOLIB */
|
||||
|
||||
static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
|
||||
|
61
include/linux/gpio/machine.h
Normal file
61
include/linux/gpio/machine.h
Normal file
@@ -0,0 +1,61 @@
|
||||
#ifndef __LINUX_GPIO_MACHINE_H
|
||||
#define __LINUX_GPIO_MACHINE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
enum gpio_lookup_flags {
|
||||
GPIO_ACTIVE_HIGH = (0 << 0),
|
||||
GPIO_ACTIVE_LOW = (1 << 0),
|
||||
GPIO_OPEN_DRAIN = (1 << 1),
|
||||
GPIO_OPEN_SOURCE = (1 << 2),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gpiod_lookup - lookup table
|
||||
* @chip_label: name of the chip the GPIO belongs to
|
||||
* @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
|
||||
* @con_id: name of the GPIO from the device's point of view
|
||||
* @idx: index of the GPIO in case several GPIOs share the same name
|
||||
* @flags: mask of GPIO_* values
|
||||
*
|
||||
* gpiod_lookup is a lookup table for associating GPIOs to specific devices and
|
||||
* functions using platform data.
|
||||
*/
|
||||
struct gpiod_lookup {
|
||||
const char *chip_label;
|
||||
u16 chip_hwnum;
|
||||
const char *con_id;
|
||||
unsigned int idx;
|
||||
enum gpio_lookup_flags flags;
|
||||
};
|
||||
|
||||
struct gpiod_lookup_table {
|
||||
struct list_head list;
|
||||
const char *dev_id;
|
||||
struct gpiod_lookup table[];
|
||||
};
|
||||
|
||||
/*
|
||||
* Simple definition of a single GPIO under a con_id
|
||||
*/
|
||||
#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
|
||||
GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
|
||||
|
||||
/*
|
||||
* Use this macro if you need to have several GPIOs under the same con_id.
|
||||
* Each GPIO needs to use a different index and can be accessed using
|
||||
* gpiod_get_index()
|
||||
*/
|
||||
#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
|
||||
{ \
|
||||
.chip_label = _chip_label, \
|
||||
.chip_hwnum = _chip_hwnum, \
|
||||
.con_id = _con_id, \
|
||||
.idx = _idx, \
|
||||
.flags = _flags, \
|
||||
}
|
||||
|
||||
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
|
||||
|
||||
#endif /* __LINUX_GPIO_MACHINE_H */
|
@@ -167,6 +167,7 @@ struct hid_item {
|
||||
#define HID_UP_MSVENDOR 0xff000000
|
||||
#define HID_UP_CUSTOM 0x00ff0000
|
||||
#define HID_UP_LOGIVENDOR 0xffbc0000
|
||||
#define HID_UP_LNVENDOR 0xffa00000
|
||||
#define HID_UP_SENSOR 0x00200000
|
||||
|
||||
#define HID_USAGE 0x0000ffff
|
||||
@@ -310,6 +311,11 @@ struct hid_item {
|
||||
*/
|
||||
#define HID_GROUP_RMI 0x0100
|
||||
|
||||
/*
|
||||
* Vendor specific HID device groups
|
||||
*/
|
||||
#define HID_GROUP_WACOM 0x0101
|
||||
|
||||
/*
|
||||
* This is the global environment of the parser. This information is
|
||||
* persistent for main-items. The global environment can be saved and
|
||||
|
@@ -93,7 +93,7 @@ static inline int kmap_atomic_idx_push(void)
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
WARN_ON_ONCE(in_irq() && !irqs_disabled());
|
||||
BUG_ON(idx > KM_TYPE_NR);
|
||||
BUG_ON(idx >= KM_TYPE_NR);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
@@ -164,12 +164,15 @@ int host1x_job_submit(struct host1x_job *job);
|
||||
*/
|
||||
|
||||
struct host1x_reloc {
|
||||
struct host1x_bo *cmdbuf;
|
||||
u32 cmdbuf_offset;
|
||||
struct host1x_bo *target;
|
||||
u32 target_offset;
|
||||
u32 shift;
|
||||
u32 pad;
|
||||
struct {
|
||||
struct host1x_bo *bo;
|
||||
unsigned long offset;
|
||||
} cmdbuf;
|
||||
struct {
|
||||
struct host1x_bo *bo;
|
||||
unsigned long offset;
|
||||
} target;
|
||||
unsigned long shift;
|
||||
};
|
||||
|
||||
struct host1x_job {
|
||||
|
@@ -165,6 +165,7 @@ enum hrtimer_base_type {
|
||||
* struct hrtimer_cpu_base - the per cpu clock bases
|
||||
* @lock: lock protecting the base and associated clock bases
|
||||
* and timers
|
||||
* @cpu: cpu number
|
||||
* @active_bases: Bitfield to mark bases with active timers
|
||||
* @clock_was_set: Indicates that clock was set from irq context.
|
||||
* @expires_next: absolute time of the next event which was scheduled
|
||||
@@ -179,6 +180,7 @@ enum hrtimer_base_type {
|
||||
*/
|
||||
struct hrtimer_cpu_base {
|
||||
raw_spinlock_t lock;
|
||||
unsigned int cpu;
|
||||
unsigned int active_bases;
|
||||
unsigned int clock_was_set;
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
@@ -324,14 +326,6 @@ static inline void timerfd_clock_was_set(void) { }
|
||||
#endif
|
||||
extern void hrtimers_resume(void);
|
||||
|
||||
extern ktime_t ktime_get(void);
|
||||
extern ktime_t ktime_get_real(void);
|
||||
extern ktime_t ktime_get_boottime(void);
|
||||
extern ktime_t ktime_get_monotonic_offset(void);
|
||||
extern ktime_t ktime_get_clocktai(void);
|
||||
extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
|
||||
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
|
||||
|
||||
@@ -452,12 +446,6 @@ extern void hrtimer_run_pending(void);
|
||||
/* Bootup initialization: */
|
||||
extern void __init hrtimers_init(void);
|
||||
|
||||
#if BITS_PER_LONG < 64
|
||||
extern u64 ktime_divns(const ktime_t kt, s64 div);
|
||||
#else /* BITS_PER_LONG < 64 */
|
||||
# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
|
||||
#endif
|
||||
|
||||
/* Show pending timers: */
|
||||
extern void sysrq_timer_list_show(void);
|
||||
|
||||
|
@@ -93,10 +93,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
|
||||
#endif /* CONFIG_DEBUG_VM */
|
||||
|
||||
extern unsigned long transparent_hugepage_flags;
|
||||
extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end);
|
||||
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
|
||||
static inline int split_huge_page(struct page *page)
|
||||
{
|
||||
|
@@ -80,13 +80,13 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
|
||||
bool isolate_huge_page(struct page *page, struct list_head *list);
|
||||
void putback_active_hugepage(struct page *page);
|
||||
bool is_hugepage_active(struct page *page);
|
||||
void free_huge_page(struct page *page);
|
||||
|
||||
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
|
||||
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
|
||||
#endif
|
||||
|
||||
extern unsigned long hugepages_treat_as_movable;
|
||||
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
||||
extern int sysctl_hugetlb_shm_group;
|
||||
extern struct list_head huge_boot_pages;
|
||||
|
||||
|
@@ -29,6 +29,8 @@
|
||||
* @read: New API. drivers can fill up to max bytes of data
|
||||
* into the buffer. The buffer is aligned for any type.
|
||||
* @priv: Private data, for use by the RNG driver.
|
||||
* @quality: Estimation of true entropy in RNG's bitstream
|
||||
* (per mill).
|
||||
*/
|
||||
struct hwrng {
|
||||
const char *name;
|
||||
@@ -38,6 +40,7 @@ struct hwrng {
|
||||
int (*data_read)(struct hwrng *rng, u32 *data);
|
||||
int (*read)(struct hwrng *rng, void *data, size_t max, bool wait);
|
||||
unsigned long priv;
|
||||
unsigned short quality;
|
||||
|
||||
/* internal. */
|
||||
struct list_head list;
|
||||
@@ -47,5 +50,7 @@ struct hwrng {
|
||||
extern int hwrng_register(struct hwrng *rng);
|
||||
/** Unregister a Hardware Random Number Generator driver. */
|
||||
extern void hwrng_unregister(struct hwrng *rng);
|
||||
/** Feed random bits into the pool. */
|
||||
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
|
||||
|
||||
#endif /* LINUX_HWRANDOM_H_ */
|
||||
|
@@ -577,4 +577,16 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
#ifdef CONFIG_I2C_ACPI
|
||||
int acpi_i2c_install_space_handler(struct i2c_adapter *adapter);
|
||||
void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter);
|
||||
void acpi_i2c_register_devices(struct i2c_adapter *adap);
|
||||
#else
|
||||
static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
|
||||
static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
|
||||
{ }
|
||||
static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
|
||||
{ return 0; }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_I2C_H */
|
||||
|
@@ -17,9 +17,6 @@
|
||||
|
||||
/* The platform data for the Atmel maXTouch touchscreen driver */
|
||||
struct mxt_platform_data {
|
||||
const u8 *config;
|
||||
size_t config_length;
|
||||
u32 config_crc;
|
||||
unsigned long irqflags;
|
||||
u8 t19_num_keys;
|
||||
const unsigned int *t19_keymap;
|
||||
|
@@ -1,10 +0,0 @@
|
||||
#ifndef __LINUX_I2C_S6000_H
|
||||
#define __LINUX_I2C_S6000_H
|
||||
|
||||
struct s6_i2c_platform_data {
|
||||
const char *clock; /* the clock to use */
|
||||
int bus_num; /* the bus number to register */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -1001,6 +1001,26 @@ struct ieee80211_vendor_ie {
|
||||
u8 oui_type;
|
||||
} __packed;
|
||||
|
||||
struct ieee80211_wmm_ac_param {
|
||||
u8 aci_aifsn; /* AIFSN, ACM, ACI */
|
||||
u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */
|
||||
__le16 txop_limit;
|
||||
} __packed;
|
||||
|
||||
struct ieee80211_wmm_param_ie {
|
||||
u8 element_id; /* Element ID: 221 (0xdd); */
|
||||
u8 len; /* Length: 24 */
|
||||
/* required fields for WMM version 1 */
|
||||
u8 oui[3]; /* 00:50:f2 */
|
||||
u8 oui_type; /* 2 */
|
||||
u8 oui_subtype; /* 1 */
|
||||
u8 version; /* 1 for WMM version 1.0 */
|
||||
u8 qos_info; /* AP/STA specific QoS info */
|
||||
u8 reserved; /* 0 */
|
||||
/* AC_BE, AC_BK, AC_VI, AC_VO */
|
||||
struct ieee80211_wmm_ac_param ac[4];
|
||||
} __packed;
|
||||
|
||||
/* Control frames */
|
||||
struct ieee80211_rts {
|
||||
__le16 frame_control;
|
||||
@@ -1621,6 +1641,9 @@ enum ieee80211_reasoncode {
|
||||
WLAN_REASON_INVALID_RSN_IE_CAP = 22,
|
||||
WLAN_REASON_IEEE8021X_FAILED = 23,
|
||||
WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
|
||||
/* TDLS (802.11z) */
|
||||
WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25,
|
||||
WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26,
|
||||
/* 802.11e */
|
||||
WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32,
|
||||
WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33,
|
||||
|
@@ -36,8 +36,28 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use
|
||||
|
||||
typedef int br_should_route_hook_t(struct sk_buff *skb);
|
||||
extern br_should_route_hook_t __rcu *br_should_route_hook;
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
|
||||
int br_multicast_list_adjacent(struct net_device *dev,
|
||||
struct list_head *br_ip_list);
|
||||
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
|
||||
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
|
||||
#else
|
||||
static inline int br_multicast_list_adjacent(struct net_device *dev,
|
||||
struct list_head *br_ip_list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool br_multicast_has_querier_anywhere(struct net_device *dev,
|
||||
int proto)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
|
||||
int proto)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -187,7 +187,6 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
|
||||
}
|
||||
|
||||
extern bool vlan_do_receive(struct sk_buff **skb);
|
||||
extern struct sk_buff *vlan_untag(struct sk_buff *skb);
|
||||
|
||||
extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
|
||||
extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
|
||||
@@ -241,11 +240,6 @@ static inline bool vlan_do_receive(struct sk_buff **skb)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
|
||||
{
|
||||
return skb;
|
||||
}
|
||||
|
||||
static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
|
||||
* KXCJK-1013 3-axis accelerometer Interface
|
||||
* Copyright (c) 2014, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -11,9 +12,11 @@
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_AHB_H__
|
||||
#define __LINUX_AHB_H__
|
||||
#ifndef __IIO_KXCJK_1013_H__
|
||||
#define __IIO_KXCJK_1013_H__
|
||||
|
||||
extern int tegra_ahb_enable_smmu(struct device_node *ahb);
|
||||
struct kxcjk_1013_platform_data {
|
||||
bool active_high_intr;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_AHB_H__ */
|
||||
#endif
|
@@ -47,6 +47,7 @@
|
||||
.type = device_type, \
|
||||
.modified = mod, \
|
||||
.info_mask_separate = mask, \
|
||||
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
|
||||
.scan_index = index, \
|
||||
.channel2 = ch2, \
|
||||
.address = addr, \
|
||||
@@ -59,11 +60,6 @@
|
||||
}, \
|
||||
}
|
||||
|
||||
#define ST_SENSOR_DEV_ATTR_SAMP_FREQ() \
|
||||
IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, \
|
||||
st_sensors_sysfs_get_sampling_frequency, \
|
||||
st_sensors_sysfs_set_sampling_frequency)
|
||||
|
||||
#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
|
||||
IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
|
||||
st_sensors_sysfs_sampling_frequency_avail)
|
||||
@@ -285,12 +281,6 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
|
||||
int st_sensors_check_device_support(struct iio_dev *indio_dev,
|
||||
int num_sensors_list, const struct st_sensors *sensors);
|
||||
|
||||
ssize_t st_sensors_sysfs_get_sampling_frequency(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
||||
ssize_t st_sensors_sysfs_set_sampling_frequency(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t size);
|
||||
|
||||
ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
||||
|
@@ -13,8 +13,19 @@
|
||||
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/iio/common/st_sensors.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
void st_sensors_i2c_configure(struct iio_dev *indio_dev,
|
||||
struct i2c_client *client, struct st_sensor_data *sdata);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
void st_sensors_of_i2c_probe(struct i2c_client *client,
|
||||
const struct of_device_id *match);
|
||||
#else
|
||||
static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
|
||||
const struct of_device_id *match)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ST_SENSORS_I2C_H */
|
||||
|
@@ -277,14 +277,7 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
|
||||
**/
|
||||
static inline s64 iio_get_time_ns(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
/*
|
||||
* calls getnstimeofday.
|
||||
* If hrtimers then up to ns accurate, if not microsecond.
|
||||
*/
|
||||
ktime_get_real_ts(&ts);
|
||||
|
||||
return timespec_to_ns(&ts);
|
||||
return ktime_get_real_ns();
|
||||
}
|
||||
|
||||
/* Device operating modes */
|
||||
|
@@ -157,13 +157,14 @@ int adis_single_conversion(struct iio_dev *indio_dev,
|
||||
const struct iio_chan_spec *chan, unsigned int error_mask,
|
||||
int *val);
|
||||
|
||||
#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, bits) { \
|
||||
#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
|
||||
.type = IIO_VOLTAGE, \
|
||||
.indexed = 1, \
|
||||
.channel = (chan), \
|
||||
.extend_name = name, \
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
|
||||
BIT(IIO_CHAN_INFO_SCALE), \
|
||||
.info_mask_shared_by_all = info_all, \
|
||||
.address = (addr), \
|
||||
.scan_index = (si), \
|
||||
.scan_type = { \
|
||||
@@ -174,19 +175,20 @@ int adis_single_conversion(struct iio_dev *indio_dev,
|
||||
}, \
|
||||
}
|
||||
|
||||
#define ADIS_SUPPLY_CHAN(addr, si, bits) \
|
||||
ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", bits)
|
||||
#define ADIS_SUPPLY_CHAN(addr, si, info_all, bits) \
|
||||
ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", info_all, bits)
|
||||
|
||||
#define ADIS_AUX_ADC_CHAN(addr, si, bits) \
|
||||
ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, bits)
|
||||
#define ADIS_AUX_ADC_CHAN(addr, si, info_all, bits) \
|
||||
ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, info_all, bits)
|
||||
|
||||
#define ADIS_TEMP_CHAN(addr, si, bits) { \
|
||||
#define ADIS_TEMP_CHAN(addr, si, info_all, bits) { \
|
||||
.type = IIO_TEMP, \
|
||||
.indexed = 1, \
|
||||
.channel = 0, \
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
|
||||
BIT(IIO_CHAN_INFO_SCALE) | \
|
||||
BIT(IIO_CHAN_INFO_OFFSET), \
|
||||
.info_mask_shared_by_all = info_all, \
|
||||
.address = (addr), \
|
||||
.scan_index = (si), \
|
||||
.scan_type = { \
|
||||
@@ -197,13 +199,14 @@ int adis_single_conversion(struct iio_dev *indio_dev,
|
||||
}, \
|
||||
}
|
||||
|
||||
#define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, bits) { \
|
||||
#define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, info_all, bits) { \
|
||||
.type = (_type), \
|
||||
.modified = 1, \
|
||||
.channel2 = IIO_MOD_ ## mod, \
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
|
||||
info_sep, \
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
|
||||
.info_mask_shared_by_all = info_all, \
|
||||
.address = (addr), \
|
||||
.scan_index = (si), \
|
||||
.scan_type = { \
|
||||
@@ -214,17 +217,17 @@ int adis_single_conversion(struct iio_dev *indio_dev,
|
||||
}, \
|
||||
}
|
||||
|
||||
#define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, bits) \
|
||||
ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, bits)
|
||||
#define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, info_all, bits) \
|
||||
ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, info_all, bits)
|
||||
|
||||
#define ADIS_GYRO_CHAN(mod, addr, si, info_sep, bits) \
|
||||
ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, bits)
|
||||
#define ADIS_GYRO_CHAN(mod, addr, si, info_sep, info_all, bits) \
|
||||
ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, info_all, bits)
|
||||
|
||||
#define ADIS_INCLI_CHAN(mod, addr, si, info_sep, bits) \
|
||||
ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, bits)
|
||||
#define ADIS_INCLI_CHAN(mod, addr, si, info_sep, info_all, bits) \
|
||||
ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, info_all, bits)
|
||||
|
||||
#define ADIS_ROT_CHAN(mod, addr, si, info_sep, bits) \
|
||||
ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, bits)
|
||||
#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \
|
||||
ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits)
|
||||
|
||||
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
|
||||
|
||||
|
@@ -129,12 +129,11 @@ void iio_trigger_unregister(struct iio_trigger *trig_info);
|
||||
/**
|
||||
* iio_trigger_poll() - called on a trigger occurring
|
||||
* @trig: trigger which occurred
|
||||
* @time: timestamp when trigger occurred
|
||||
*
|
||||
* Typically called in relevant hardware interrupt handler.
|
||||
**/
|
||||
void iio_trigger_poll(struct iio_trigger *trig, s64 time);
|
||||
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time);
|
||||
void iio_trigger_poll(struct iio_trigger *trig);
|
||||
void iio_trigger_poll_chained(struct iio_trigger *trig);
|
||||
|
||||
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
|
||||
|
||||
|
@@ -56,6 +56,10 @@ enum iio_modifier {
|
||||
IIO_MOD_QUATERNION,
|
||||
IIO_MOD_TEMP_AMBIENT,
|
||||
IIO_MOD_TEMP_OBJECT,
|
||||
IIO_MOD_NORTH_MAGN,
|
||||
IIO_MOD_NORTH_TRUE,
|
||||
IIO_MOD_NORTH_MAGN_TILT_COMP,
|
||||
IIO_MOD_NORTH_TRUE_TILT_COMP
|
||||
};
|
||||
|
||||
enum iio_event_type {
|
||||
@@ -70,6 +74,7 @@ enum iio_event_info {
|
||||
IIO_EV_INFO_ENABLE,
|
||||
IIO_EV_INFO_VALUE,
|
||||
IIO_EV_INFO_HYSTERESIS,
|
||||
IIO_EV_INFO_PERIOD,
|
||||
};
|
||||
|
||||
enum iio_event_direction {
|
||||
|
@@ -19,6 +19,7 @@ extern int ima_file_check(struct file *file, int mask);
|
||||
extern void ima_file_free(struct file *file);
|
||||
extern int ima_file_mmap(struct file *file, unsigned long prot);
|
||||
extern int ima_module_check(struct file *file);
|
||||
extern int ima_fw_from_file(struct file *file, char *buf, size_t size);
|
||||
|
||||
#else
|
||||
static inline int ima_bprm_check(struct linux_binprm *bprm)
|
||||
@@ -46,6 +47,11 @@ static inline int ima_module_check(struct file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ima_fw_from_file(struct file *file, char *buf, size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IMA */
|
||||
|
||||
#ifdef CONFIG_IMA_APPRAISE
|
||||
|
@@ -102,12 +102,6 @@ extern struct group_info init_groups;
|
||||
#define INIT_IDS
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
#define INIT_TASK_RCU_BOOST() \
|
||||
.rcu_boost_mutex = NULL,
|
||||
#else
|
||||
#define INIT_TASK_RCU_BOOST()
|
||||
#endif
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
#define INIT_TASK_RCU_TREE_PREEMPT() \
|
||||
.rcu_blocked_node = NULL,
|
||||
@@ -119,8 +113,7 @@ extern struct group_info init_groups;
|
||||
.rcu_read_lock_nesting = 0, \
|
||||
.rcu_read_unlock_special = 0, \
|
||||
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
|
||||
INIT_TASK_RCU_TREE_PREEMPT() \
|
||||
INIT_TASK_RCU_BOOST()
|
||||
INIT_TASK_RCU_TREE_PREEMPT()
|
||||
#else
|
||||
#define INIT_TASK_RCU_PREEMPT(tsk)
|
||||
#endif
|
||||
|
@@ -105,6 +105,7 @@ void input_mt_report_slot_state(struct input_dev *dev,
|
||||
|
||||
void input_mt_report_finger_count(struct input_dev *dev, int count);
|
||||
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
|
||||
void input_mt_drop_unused(struct input_dev *dev);
|
||||
|
||||
void input_mt_sync_frame(struct input_dev *dev);
|
||||
|
||||
|
@@ -43,10 +43,22 @@ enum pixcir_int_mode {
|
||||
#define PIXCIR_INT_ENABLE (1UL << 3)
|
||||
#define PIXCIR_INT_POL_HIGH (1UL << 2)
|
||||
|
||||
/**
|
||||
* struct pixcir_irc_chip_data - chip related data
|
||||
* @max_fingers: Max number of fingers reported simultaneously by h/w
|
||||
* @has_hw_ids: Hardware supports finger tracking IDs
|
||||
*
|
||||
*/
|
||||
struct pixcir_i2c_chip_data {
|
||||
u8 max_fingers;
|
||||
bool has_hw_ids;
|
||||
};
|
||||
|
||||
struct pixcir_ts_platform_data {
|
||||
int x_max;
|
||||
int y_max;
|
||||
int gpio_attb; /* GPIO connected to ATTB line */
|
||||
struct pixcir_i2c_chip_data chip;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -336,6 +336,7 @@ struct intel_iommu {
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
struct ir_table *ir_table; /* Interrupt remapping info */
|
||||
#endif
|
||||
struct device *iommu_dev; /* IOMMU-sysfs device */
|
||||
int node;
|
||||
};
|
||||
|
||||
@@ -365,4 +366,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
extern int dmar_ir_support(void);
|
||||
|
||||
extern const struct attribute_group *intel_iommu_groups[];
|
||||
|
||||
#endif
|
||||
|
@@ -58,6 +58,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
|
||||
}
|
||||
#endif
|
||||
|
||||
#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
|
||||
|
||||
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
|
||||
unsigned long size);
|
||||
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
|
||||
|
@@ -50,7 +50,7 @@ struct iommu_domain_geometry {
|
||||
};
|
||||
|
||||
struct iommu_domain {
|
||||
struct iommu_ops *ops;
|
||||
const struct iommu_ops *ops;
|
||||
void *priv;
|
||||
iommu_fault_handler_t handler;
|
||||
void *handler_token;
|
||||
@@ -140,7 +140,7 @@ struct iommu_ops {
|
||||
#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
|
||||
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
|
||||
|
||||
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
|
||||
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
|
||||
extern bool iommu_present(struct bus_type *bus);
|
||||
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
|
||||
extern struct iommu_group *iommu_group_get_by_id(int id);
|
||||
@@ -181,11 +181,18 @@ extern int iommu_group_register_notifier(struct iommu_group *group,
|
||||
extern int iommu_group_unregister_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb);
|
||||
extern int iommu_group_id(struct iommu_group *group);
|
||||
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
|
||||
|
||||
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
|
||||
void *data);
|
||||
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
|
||||
void *data);
|
||||
struct device *iommu_device_create(struct device *parent, void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...);
|
||||
void iommu_device_destroy(struct device *dev);
|
||||
int iommu_device_link(struct device *dev, struct device *link);
|
||||
void iommu_device_unlink(struct device *dev, struct device *link);
|
||||
|
||||
/* Window handling function prototypes */
|
||||
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
@@ -396,6 +403,27 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct device *iommu_device_create(struct device *parent,
|
||||
void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline void iommu_device_destroy(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_device_link(struct device *dev, struct device *link)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void iommu_device_unlink(struct device *dev, struct device *link)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
#endif /* __LINUX_IOMMU_H */
|
||||
|
@@ -237,6 +237,12 @@ extern int iomem_is_exclusive(u64 addr);
|
||||
extern int
|
||||
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg, int (*func)(unsigned long, unsigned long, void *));
|
||||
extern int
|
||||
walk_system_ram_res(u64 start, u64 end, void *arg,
|
||||
int (*func)(u64, u64, void *));
|
||||
extern int
|
||||
walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
|
||||
int (*func)(u64, u64, void *));
|
||||
|
||||
/* True if any part of r1 overlaps r2 */
|
||||
static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
|
||||
|
@@ -34,6 +34,11 @@ struct iova_domain {
|
||||
unsigned long dma_32bit_pfn;
|
||||
};
|
||||
|
||||
static inline unsigned long iova_size(struct iova *iova)
|
||||
{
|
||||
return iova->pfn_hi - iova->pfn_lo + 1;
|
||||
}
|
||||
|
||||
struct iova *alloc_iova_mem(void);
|
||||
void free_iova_mem(struct iova *iova);
|
||||
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
|
@@ -39,6 +39,7 @@ struct ipv6_devconf {
|
||||
#endif
|
||||
__s32 proxy_ndp;
|
||||
__s32 accept_source_route;
|
||||
__s32 accept_ra_from_local;
|
||||
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
|
||||
__s32 optimistic_dad;
|
||||
#endif
|
||||
@@ -193,12 +194,13 @@ struct ipv6_pinfo {
|
||||
sndflow:1,
|
||||
repflow:1,
|
||||
pmtudisc:3,
|
||||
ipv6only:1,
|
||||
padding:1, /* 1 bit hole */
|
||||
srcprefs:3, /* 001: prefer temporary address
|
||||
* 010: prefer public address
|
||||
* 100: prefer care-of address
|
||||
*/
|
||||
dontfrag:1;
|
||||
dontfrag:1,
|
||||
autoflowlabel:1;
|
||||
__u8 min_hopcount;
|
||||
__u8 tclass;
|
||||
__be32 rcv_flowinfo;
|
||||
@@ -256,16 +258,6 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
|
||||
return inet_sk(__sk)->pinet6;
|
||||
}
|
||||
|
||||
static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
|
||||
{
|
||||
struct request_sock *req = reqsk_alloc(ops);
|
||||
|
||||
if (req)
|
||||
inet_rsk(req)->pktopts = NULL;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
|
||||
{
|
||||
return (struct raw6_sock *)sk;
|
||||
@@ -282,8 +274,8 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
|
||||
__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
|
||||
}
|
||||
|
||||
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
|
||||
#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
|
||||
#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
|
||||
#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
|
||||
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
|
||||
inet6_sk(sk)->rxopt.bits.rxinfo)
|
||||
|
||||
@@ -296,8 +288,8 @@ static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
|
||||
|
||||
static inline int inet_v6_ipv6only(const struct sock *sk)
|
||||
{
|
||||
return likely(sk->sk_state != TCP_TIME_WAIT) ?
|
||||
ipv6_only_sock(sk) : inet_twsk(sk)->tw_ipv6only;
|
||||
/* ipv6only field is at same position for timewait and other sockets */
|
||||
return ipv6_only_sock(sk);
|
||||
}
|
||||
#else
|
||||
#define __ipv6_only_sock(sk) 0
|
||||
|
@@ -771,6 +771,8 @@ void irq_gc_eoi(struct irq_data *d);
|
||||
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
|
||||
|
||||
/* Setup functions for irq_chip_generic */
|
||||
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hw_irq);
|
||||
struct irq_chip_generic *
|
||||
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
|
||||
void __iomem *reg_base, irq_flow_handler_t handler);
|
||||
|
@@ -33,6 +33,11 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
|
||||
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
|
||||
|
||||
bool irq_work_queue(struct irq_work *work);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bool irq_work_queue_on(struct irq_work *work, int cpu);
|
||||
#endif
|
||||
|
||||
void irq_work_run(void);
|
||||
void irq_work_sync(struct irq_work *work);
|
||||
|
||||
|
200
include/linux/irqchip/arm-gic-v3.h
Normal file
200
include/linux/irqchip/arm-gic-v3.h
Normal file
@@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
|
||||
#define __LINUX_IRQCHIP_ARM_GIC_V3_H
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/*
|
||||
* Distributor registers. We assume we're running non-secure, with ARE
|
||||
* being set. Secure-only and non-ARE registers are not described.
|
||||
*/
|
||||
#define GICD_CTLR 0x0000
|
||||
#define GICD_TYPER 0x0004
|
||||
#define GICD_IIDR 0x0008
|
||||
#define GICD_STATUSR 0x0010
|
||||
#define GICD_SETSPI_NSR 0x0040
|
||||
#define GICD_CLRSPI_NSR 0x0048
|
||||
#define GICD_SETSPI_SR 0x0050
|
||||
#define GICD_CLRSPI_SR 0x0058
|
||||
#define GICD_SEIR 0x0068
|
||||
#define GICD_ISENABLER 0x0100
|
||||
#define GICD_ICENABLER 0x0180
|
||||
#define GICD_ISPENDR 0x0200
|
||||
#define GICD_ICPENDR 0x0280
|
||||
#define GICD_ISACTIVER 0x0300
|
||||
#define GICD_ICACTIVER 0x0380
|
||||
#define GICD_IPRIORITYR 0x0400
|
||||
#define GICD_ICFGR 0x0C00
|
||||
#define GICD_IROUTER 0x6000
|
||||
#define GICD_PIDR2 0xFFE8
|
||||
|
||||
#define GICD_CTLR_RWP (1U << 31)
|
||||
#define GICD_CTLR_ARE_NS (1U << 4)
|
||||
#define GICD_CTLR_ENABLE_G1A (1U << 1)
|
||||
#define GICD_CTLR_ENABLE_G1 (1U << 0)
|
||||
|
||||
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
|
||||
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
|
||||
|
||||
#define GIC_PIDR2_ARCH_MASK 0xf0
|
||||
#define GIC_PIDR2_ARCH_GICv3 0x30
|
||||
#define GIC_PIDR2_ARCH_GICv4 0x40
|
||||
|
||||
/*
|
||||
* Re-Distributor registers, offsets from RD_base
|
||||
*/
|
||||
#define GICR_CTLR GICD_CTLR
|
||||
#define GICR_IIDR 0x0004
|
||||
#define GICR_TYPER 0x0008
|
||||
#define GICR_STATUSR GICD_STATUSR
|
||||
#define GICR_WAKER 0x0014
|
||||
#define GICR_SETLPIR 0x0040
|
||||
#define GICR_CLRLPIR 0x0048
|
||||
#define GICR_SEIR GICD_SEIR
|
||||
#define GICR_PROPBASER 0x0070
|
||||
#define GICR_PENDBASER 0x0078
|
||||
#define GICR_INVLPIR 0x00A0
|
||||
#define GICR_INVALLR 0x00B0
|
||||
#define GICR_SYNCR 0x00C0
|
||||
#define GICR_MOVLPIR 0x0100
|
||||
#define GICR_MOVALLR 0x0110
|
||||
#define GICR_PIDR2 GICD_PIDR2
|
||||
|
||||
#define GICR_WAKER_ProcessorSleep (1U << 1)
|
||||
#define GICR_WAKER_ChildrenAsleep (1U << 2)
|
||||
|
||||
/*
|
||||
* Re-Distributor registers, offsets from SGI_base
|
||||
*/
|
||||
#define GICR_ISENABLER0 GICD_ISENABLER
|
||||
#define GICR_ICENABLER0 GICD_ICENABLER
|
||||
#define GICR_ISPENDR0 GICD_ISPENDR
|
||||
#define GICR_ICPENDR0 GICD_ICPENDR
|
||||
#define GICR_ISACTIVER0 GICD_ISACTIVER
|
||||
#define GICR_ICACTIVER0 GICD_ICACTIVER
|
||||
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
|
||||
#define GICR_ICFGR0 GICD_ICFGR
|
||||
|
||||
#define GICR_TYPER_VLPIS (1U << 1)
|
||||
#define GICR_TYPER_LAST (1U << 4)
|
||||
|
||||
/*
|
||||
* CPU interface registers
|
||||
*/
|
||||
#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
|
||||
#define ICC_CTLR_EL1_EOImode_drop (1U << 1)
|
||||
#define ICC_SRE_EL1_SRE (1U << 0)
|
||||
|
||||
/*
|
||||
* Hypervisor interface registers (SRE only)
|
||||
*/
|
||||
#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
|
||||
|
||||
#define ICH_LR_EOI (1UL << 41)
|
||||
#define ICH_LR_GROUP (1UL << 60)
|
||||
#define ICH_LR_STATE (3UL << 62)
|
||||
#define ICH_LR_PENDING_BIT (1UL << 62)
|
||||
#define ICH_LR_ACTIVE_BIT (1UL << 63)
|
||||
|
||||
#define ICH_MISR_EOI (1 << 0)
|
||||
#define ICH_MISR_U (1 << 1)
|
||||
|
||||
#define ICH_HCR_EN (1 << 0)
|
||||
#define ICH_HCR_UIE (1 << 1)
|
||||
|
||||
#define ICH_VMCR_CTLR_SHIFT 0
|
||||
#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT)
|
||||
#define ICH_VMCR_BPR1_SHIFT 18
|
||||
#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
|
||||
#define ICH_VMCR_BPR0_SHIFT 21
|
||||
#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
|
||||
#define ICH_VMCR_PMR_SHIFT 24
|
||||
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
|
||||
|
||||
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
|
||||
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
|
||||
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
|
||||
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
|
||||
#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
|
||||
#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
|
||||
#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
|
||||
|
||||
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
|
||||
|
||||
#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
|
||||
|
||||
#define ICC_SRE_EL2_SRE (1 << 0)
|
||||
#define ICC_SRE_EL2_ENABLE (1 << 3)
|
||||
|
||||
/*
|
||||
* System register definitions
|
||||
*/
|
||||
#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
|
||||
#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
|
||||
#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
|
||||
#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
|
||||
#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
|
||||
#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
|
||||
#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
|
||||
|
||||
#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
|
||||
#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
|
||||
|
||||
#define ICH_LR0_EL2 __LR0_EL2(0)
|
||||
#define ICH_LR1_EL2 __LR0_EL2(1)
|
||||
#define ICH_LR2_EL2 __LR0_EL2(2)
|
||||
#define ICH_LR3_EL2 __LR0_EL2(3)
|
||||
#define ICH_LR4_EL2 __LR0_EL2(4)
|
||||
#define ICH_LR5_EL2 __LR0_EL2(5)
|
||||
#define ICH_LR6_EL2 __LR0_EL2(6)
|
||||
#define ICH_LR7_EL2 __LR0_EL2(7)
|
||||
#define ICH_LR8_EL2 __LR8_EL2(0)
|
||||
#define ICH_LR9_EL2 __LR8_EL2(1)
|
||||
#define ICH_LR10_EL2 __LR8_EL2(2)
|
||||
#define ICH_LR11_EL2 __LR8_EL2(3)
|
||||
#define ICH_LR12_EL2 __LR8_EL2(4)
|
||||
#define ICH_LR13_EL2 __LR8_EL2(5)
|
||||
#define ICH_LR14_EL2 __LR8_EL2(6)
|
||||
#define ICH_LR15_EL2 __LR8_EL2(7)
|
||||
|
||||
#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
|
||||
#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
|
||||
#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
|
||||
#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
|
||||
#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
|
||||
|
||||
#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
|
||||
#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
|
||||
#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
|
||||
#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
|
||||
#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
static inline void gic_write_eoir(u64 irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
|
||||
isb();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -1,64 +0,0 @@
|
||||
/*
|
||||
* SPEAr platform shared irq layer header file
|
||||
*
|
||||
* Copyright (C) 2009-2012 ST Microelectronics
|
||||
* Viresh Kumar <viresh.linux@gmail.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#ifndef __SPEAR_SHIRQ_H
|
||||
#define __SPEAR_SHIRQ_H
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* struct shirq_regs: shared irq register configuration
|
||||
*
|
||||
* enb_reg: enable register offset
|
||||
* reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt
|
||||
* status_reg: status register offset
|
||||
* status_reg_mask: status register valid mask
|
||||
* clear_reg: clear register offset
|
||||
* reset_to_clear: val 1 indicates, we need to clear bit for clearing interrupt
|
||||
*/
|
||||
struct shirq_regs {
|
||||
u32 enb_reg;
|
||||
u32 reset_to_enb;
|
||||
u32 status_reg;
|
||||
u32 clear_reg;
|
||||
u32 reset_to_clear;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct spear_shirq: shared irq structure
|
||||
*
|
||||
* irq: hardware irq number
|
||||
* irq_base: base irq in linux domain
|
||||
* irq_nr: no. of shared interrupts in a particular block
|
||||
* irq_bit_off: starting bit offset in the status register
|
||||
* invalid_irq: irq group is currently disabled
|
||||
* base: base address of shared irq register
|
||||
* regs: register configuration for shared irq block
|
||||
*/
|
||||
struct spear_shirq {
|
||||
u32 irq;
|
||||
u32 irq_base;
|
||||
u32 irq_nr;
|
||||
u32 irq_bit_off;
|
||||
int invalid_irq;
|
||||
void __iomem *base;
|
||||
struct shirq_regs regs;
|
||||
};
|
||||
|
||||
int __init spear300_shirq_of_init(struct device_node *np,
|
||||
struct device_node *parent);
|
||||
int __init spear310_shirq_of_init(struct device_node *np,
|
||||
struct device_node *parent);
|
||||
int __init spear320_shirq_of_init(struct device_node *np,
|
||||
struct device_node *parent);
|
||||
|
||||
#endif /* __SPEAR_SHIRQ_H */
|
@@ -172,6 +172,8 @@ extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
|
||||
extern void irq_domain_associate_many(struct irq_domain *domain,
|
||||
unsigned int irq_base,
|
||||
irq_hw_number_t hwirq_base, int count);
|
||||
extern void irq_domain_disassociate(struct irq_domain *domain,
|
||||
unsigned int irq);
|
||||
|
||||
extern unsigned int irq_create_mapping(struct irq_domain *host,
|
||||
irq_hw_number_t hwirq);
|
||||
|
@@ -180,8 +180,8 @@ struct ippp_struct {
|
||||
struct slcompress *slcomp;
|
||||
#endif
|
||||
#ifdef CONFIG_IPPP_FILTER
|
||||
struct sk_filter *pass_filter; /* filter for packets to pass */
|
||||
struct sk_filter *active_filter; /* filter for pkts to reset idle */
|
||||
struct bpf_prog *pass_filter; /* filter for packets to pass */
|
||||
struct bpf_prog *active_filter; /* filter for pkts to reset idle */
|
||||
#endif
|
||||
unsigned long debug;
|
||||
struct isdn_ppp_compressor *compressor,*decompressor;
|
||||
|
@@ -470,6 +470,7 @@ extern enum system_states {
|
||||
#define TAINT_FIRMWARE_WORKAROUND 11
|
||||
#define TAINT_OOT_MODULE 12
|
||||
#define TAINT_UNSIGNED_MODULE 13
|
||||
#define TAINT_SOFTLOCKUP 14
|
||||
|
||||
extern const char hex_asc[];
|
||||
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
|
||||
@@ -493,15 +494,10 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
|
||||
{
|
||||
return hex_byte_pack(buf, byte);
|
||||
}
|
||||
|
||||
extern int hex_to_bin(char ch);
|
||||
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
|
||||
|
||||
int mac_pton(const char *s, u8 *mac);
|
||||
bool mac_pton(const char *s, u8 *mac);
|
||||
|
||||
/*
|
||||
* General tracing related utility functions - trace_printk(),
|
||||
@@ -849,5 +845,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
||||
/* User perms >= group perms >= other perms */ \
|
||||
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
|
||||
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
|
||||
/* Other writable? Generally considered a bad idea. */ \
|
||||
BUILD_BUG_ON_ZERO((perms) & 2) + \
|
||||
(perms))
|
||||
#endif
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
/* Verify architecture specific macros are defined */
|
||||
@@ -69,7 +70,18 @@ typedef unsigned long kimage_entry_t;
|
||||
#define IND_SOURCE 0x8
|
||||
|
||||
struct kexec_segment {
|
||||
void __user *buf;
|
||||
/*
|
||||
* This pointer can point to user memory if kexec_load() system
|
||||
* call is used or will point to kernel memory if
|
||||
* kexec_file_load() system call is used.
|
||||
*
|
||||
* Use ->buf when expecting to deal with user memory and use ->kbuf
|
||||
* when expecting to deal with kernel memory.
|
||||
*/
|
||||
union {
|
||||
void __user *buf;
|
||||
void *kbuf;
|
||||
};
|
||||
size_t bufsz;
|
||||
unsigned long mem;
|
||||
size_t memsz;
|
||||
@@ -84,6 +96,27 @@ struct compat_kexec_segment {
|
||||
};
|
||||
#endif
|
||||
|
||||
struct kexec_sha_region {
|
||||
unsigned long start;
|
||||
unsigned long len;
|
||||
};
|
||||
|
||||
struct purgatory_info {
|
||||
/* Pointer to elf header of read only purgatory */
|
||||
Elf_Ehdr *ehdr;
|
||||
|
||||
/* Pointer to purgatory sechdrs which are modifiable */
|
||||
Elf_Shdr *sechdrs;
|
||||
/*
|
||||
* Temporary buffer location where purgatory is loaded and relocated
|
||||
* This memory can be freed post image load
|
||||
*/
|
||||
void *purgatory_buf;
|
||||
|
||||
/* Address where purgatory is finally loaded and is executed from */
|
||||
unsigned long purgatory_load_addr;
|
||||
};
|
||||
|
||||
struct kimage {
|
||||
kimage_entry_t head;
|
||||
kimage_entry_t *entry;
|
||||
@@ -100,7 +133,7 @@ struct kimage {
|
||||
|
||||
struct list_head control_pages;
|
||||
struct list_head dest_pages;
|
||||
struct list_head unuseable_pages;
|
||||
struct list_head unusable_pages;
|
||||
|
||||
/* Address of next control page to allocate for crash kernels. */
|
||||
unsigned long control_page;
|
||||
@@ -110,13 +143,63 @@ struct kimage {
|
||||
#define KEXEC_TYPE_DEFAULT 0
|
||||
#define KEXEC_TYPE_CRASH 1
|
||||
unsigned int preserve_context : 1;
|
||||
/* If set, we are using file mode kexec syscall */
|
||||
unsigned int file_mode:1;
|
||||
|
||||
#ifdef ARCH_HAS_KIMAGE_ARCH
|
||||
struct kimage_arch arch;
|
||||
#endif
|
||||
|
||||
/* Additional fields for file based kexec syscall */
|
||||
void *kernel_buf;
|
||||
unsigned long kernel_buf_len;
|
||||
|
||||
void *initrd_buf;
|
||||
unsigned long initrd_buf_len;
|
||||
|
||||
char *cmdline_buf;
|
||||
unsigned long cmdline_buf_len;
|
||||
|
||||
/* File operations provided by image loader */
|
||||
struct kexec_file_ops *fops;
|
||||
|
||||
/* Image loader handling the kernel can store a pointer here */
|
||||
void *image_loader_data;
|
||||
|
||||
/* Information for loading purgatory */
|
||||
struct purgatory_info purgatory_info;
|
||||
};
|
||||
|
||||
/*
|
||||
* Keeps track of buffer parameters as provided by caller for requesting
|
||||
* memory placement of buffer.
|
||||
*/
|
||||
struct kexec_buf {
|
||||
struct kimage *image;
|
||||
char *buffer;
|
||||
unsigned long bufsz;
|
||||
unsigned long memsz;
|
||||
unsigned long buf_align;
|
||||
unsigned long buf_min;
|
||||
unsigned long buf_max;
|
||||
bool top_down; /* allocate from top of memory hole */
|
||||
};
|
||||
|
||||
typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
|
||||
typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
|
||||
unsigned long kernel_len, char *initrd,
|
||||
unsigned long initrd_len, char *cmdline,
|
||||
unsigned long cmdline_len);
|
||||
typedef int (kexec_cleanup_t)(void *loader_data);
|
||||
typedef int (kexec_verify_sig_t)(const char *kernel_buf,
|
||||
unsigned long kernel_len);
|
||||
|
||||
struct kexec_file_ops {
|
||||
kexec_probe_t *probe;
|
||||
kexec_load_t *load;
|
||||
kexec_cleanup_t *cleanup;
|
||||
kexec_verify_sig_t *verify_sig;
|
||||
};
|
||||
|
||||
/* kexec interface functions */
|
||||
extern void machine_kexec(struct kimage *image);
|
||||
@@ -127,8 +210,21 @@ extern asmlinkage long sys_kexec_load(unsigned long entry,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
extern int kernel_kexec(void);
|
||||
extern int kexec_add_buffer(struct kimage *image, char *buffer,
|
||||
unsigned long bufsz, unsigned long memsz,
|
||||
unsigned long buf_align, unsigned long buf_min,
|
||||
unsigned long buf_max, bool top_down,
|
||||
unsigned long *load_addr);
|
||||
extern struct page *kimage_alloc_control_pages(struct kimage *image,
|
||||
unsigned int order);
|
||||
extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
|
||||
unsigned long max, int top_down,
|
||||
unsigned long *load_addr);
|
||||
extern int kexec_purgatory_get_set_symbol(struct kimage *image,
|
||||
const char *name, void *buf,
|
||||
unsigned int size, bool get_value);
|
||||
extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
|
||||
const char *name);
|
||||
extern void crash_kexec(struct pt_regs *);
|
||||
int kexec_should_crash(struct task_struct *);
|
||||
void crash_save_cpu(struct pt_regs *regs, int cpu);
|
||||
@@ -177,6 +273,10 @@ extern int kexec_load_disabled;
|
||||
#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
|
||||
#endif
|
||||
|
||||
/* List of defined/legal kexec file flags */
|
||||
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
|
||||
KEXEC_FILE_NO_INITRAMFS)
|
||||
|
||||
#define VMCOREINFO_BYTES (4096)
|
||||
#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
|
||||
#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user