Merge tag 'v4.3-rc3' into next

Merge with Linux 4.3-rc3 to bring in MFD DA9062 changes to merge DA9062
OnKey driver.
This commit is contained in:
Dmitry Torokhov
2015-09-29 16:28:52 -07:00
10159 changed files with 562719 additions and 255386 deletions

View File

@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -221,7 +217,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);

View File

@@ -45,23 +45,27 @@ enum asn1_opcode {
ASN1_OP_MATCH_JUMP = 0x04,
ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05,
ASN1_OP_MATCH_ANY = 0x08,
ASN1_OP_MATCH_ANY_OR_SKIP = 0x09,
ASN1_OP_MATCH_ANY_ACT = 0x0a,
ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b,
/* Everything before here matches unconditionally */
ASN1_OP_COND_MATCH_OR_SKIP = 0x11,
ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13,
ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15,
ASN1_OP_COND_MATCH_ANY = 0x18,
ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19,
ASN1_OP_COND_MATCH_ANY_ACT = 0x1a,
ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b,
/* Everything before here will want a tag from the data */
#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT
#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP
/* These are here to help fill up space */
ASN1_OP_COND_FAIL = 0x1b,
ASN1_OP_COMPLETE = 0x1c,
ASN1_OP_ACT = 0x1d,
ASN1_OP_RETURN = 0x1e,
ASN1_OP_COND_FAIL = 0x1c,
ASN1_OP_COMPLETE = 0x1d,
ASN1_OP_ACT = 0x1e,
ASN1_OP_MAYBE_ACT = 0x1f,
/* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
ASN1_OP_END_SEQ = 0x20,
@@ -76,6 +80,8 @@ enum asn1_opcode {
#define ASN1_OP_END__OF 0x02
#define ASN1_OP_END__ACT 0x04
ASN1_OP_RETURN = 0x28,
ASN1_OP__NR
};

View File

@@ -45,6 +45,7 @@ enum {
ATA_SECT_SIZE = 512,
ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
ATA_MAX_SECTORS_1024 = 1024,
ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
ATA_MAX_SECTORS_TAPE = 65535,
@@ -384,8 +385,6 @@ enum {
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
@@ -529,8 +528,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
#define ata_id_has_ncq_autosense(id) \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -719,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
return false;
}
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
}
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data

View File

@@ -16,115 +16,151 @@
#ifndef ATMEL_SERIAL_H
#define ATMEL_SERIAL_H
#define ATMEL_US_CR 0x00 /* Control Register */
#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */
#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */
#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */
#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */
#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */
#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */
#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */
#define ATMEL_US_STTBRK (1 << 9) /* Start Break */
#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */
#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */
#define ATMEL_US_SENDA (1 << 12) /* Send Address */
#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */
#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */
#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */
#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */
#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */
#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */
#define ATMEL_US_CR 0x00 /* Control Register */
#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
#define ATMEL_US_STTBRK BIT(9) /* Start Break */
#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
#define ATMEL_US_SENDA BIT(12) /* Send Address */
#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
#define ATMEL_US_MR 0x04 /* Mode Register */
#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */
#define ATMEL_US_USMODE_NORMAL 0
#define ATMEL_US_USMODE_RS485 1
#define ATMEL_US_USMODE_HWHS 2
#define ATMEL_US_USMODE_MODEM 3
#define ATMEL_US_USMODE_ISO7816_T0 4
#define ATMEL_US_USMODE_ISO7816_T1 6
#define ATMEL_US_USMODE_IRDA 8
#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */
#define ATMEL_US_USCLKS_MCK (0 << 4)
#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
#define ATMEL_US_USCLKS_SCK (3 << 4)
#define ATMEL_US_CHRL (3 << 6) /* Character Length */
#define ATMEL_US_CHRL_5 (0 << 6)
#define ATMEL_US_CHRL_6 (1 << 6)
#define ATMEL_US_CHRL_7 (2 << 6)
#define ATMEL_US_CHRL_8 (3 << 6)
#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */
#define ATMEL_US_PAR (7 << 9) /* Parity Type */
#define ATMEL_US_PAR_EVEN (0 << 9)
#define ATMEL_US_PAR_ODD (1 << 9)
#define ATMEL_US_PAR_SPACE (2 << 9)
#define ATMEL_US_PAR_MARK (3 << 9)
#define ATMEL_US_PAR_NONE (4 << 9)
#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */
#define ATMEL_US_NBSTOP_1 (0 << 12)
#define ATMEL_US_NBSTOP_1_5 (1 << 12)
#define ATMEL_US_NBSTOP_2 (2 << 12)
#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */
#define ATMEL_US_CHMODE_NORMAL (0 << 14)
#define ATMEL_US_CHMODE_ECHO (1 << 14)
#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
#define ATMEL_US_MSBF (1 << 16) /* Bit Order */
#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */
#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */
#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */
#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */
#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */
#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
#define ATMEL_US_MR 0x04 /* Mode Register */
#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
#define ATMEL_US_USMODE_NORMAL 0
#define ATMEL_US_USMODE_RS485 1
#define ATMEL_US_USMODE_HWHS 2
#define ATMEL_US_USMODE_MODEM 3
#define ATMEL_US_USMODE_ISO7816_T0 4
#define ATMEL_US_USMODE_ISO7816_T1 6
#define ATMEL_US_USMODE_IRDA 8
#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
#define ATMEL_US_USCLKS_MCK (0 << 4)
#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
#define ATMEL_US_USCLKS_SCK (3 << 4)
#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
#define ATMEL_US_CHRL_5 (0 << 6)
#define ATMEL_US_CHRL_6 (1 << 6)
#define ATMEL_US_CHRL_7 (2 << 6)
#define ATMEL_US_CHRL_8 (3 << 6)
#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
#define ATMEL_US_PAR_EVEN (0 << 9)
#define ATMEL_US_PAR_ODD (1 << 9)
#define ATMEL_US_PAR_SPACE (2 << 9)
#define ATMEL_US_PAR_MARK (3 << 9)
#define ATMEL_US_PAR_NONE (4 << 9)
#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
#define ATMEL_US_NBSTOP_1 (0 << 12)
#define ATMEL_US_NBSTOP_1_5 (1 << 12)
#define ATMEL_US_NBSTOP_2 (2 << 12)
#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
#define ATMEL_US_CHMODE_NORMAL (0 << 14)
#define ATMEL_US_CHMODE_ECHO (1 << 14)
#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
#define ATMEL_US_MSBF BIT(16) /* Bit Order */
#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */
#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */
#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */
#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */
#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */
#define ATMEL_US_FRAME (1 << 6) /* Framing Error */
#define ATMEL_US_PARE (1 << 7) /* Parity Error */
#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */
#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */
#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */
#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */
#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */
#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */
#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */
#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */
#define ATMEL_US_RI (1 << 20) /* RI */
#define ATMEL_US_DSR (1 << 21) /* DSR */
#define ATMEL_US_DCD (1 << 22) /* DCD */
#define ATMEL_US_CTS (1 << 23) /* CTS */
#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
#define ATMEL_US_FRAME BIT(6) /* Framing Error */
#define ATMEL_US_PARE BIT(7) /* Parity Error */
#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
#define ATMEL_US_RI BIT(20) /* RI */
#define ATMEL_US_DSR BIT(21) /* DSR */
#define ATMEL_US_DCD BIT(22) /* DCD */
#define ATMEL_US_CTS BIT(23) /* CTS */
#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
#define ATMEL_US_CSR 0x14 /* Channel Status Register */
#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */
#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
#define ATMEL_US_CSR 0x14 /* Channel Status Register */
#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */
#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */
#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */
#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
#define ATMEL_US_NER 0x44 /* Number of Errors Register */
#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
#define ATMEL_US_NER 0x44 /* Number of Errors Register */
#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
#define ATMEL_US_NAME 0xf0 /* Ip Name */
#define ATMEL_US_VERSION 0xfc /* Ip Version */
#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
#define ATMEL_US_ONE_DATA 0x0
#define ATMEL_US_TWO_DATA 0x1
#define ATMEL_US_FOUR_DATA 0x2
#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
#define ATMEL_US_NAME 0xf0 /* Ip Name */
#define ATMEL_US_VERSION 0xfc /* Ip Version */
#endif

View File

@@ -2,6 +2,329 @@
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
#include <asm/barrier.h>
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
* We support four variants:
*
* - Fully ordered: The default implementation, no suffix required.
* - Acquire: Provides ACQUIRE semantics, _acquire suffix.
* - Release: Provides RELEASE semantics, _release suffix.
* - Relaxed: No ordering guarantees, _relaxed suffix.
*
* For compound atomics performing both a load and a store, ACQUIRE
* semantics apply only to the load and RELEASE semantics only to the
* store portion of the operation. Note that a failed cmpxchg_acquire
* does -not- imply any memory ordering constraints.
*
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
#ifndef atomic_read_acquire
#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
#ifndef atomic_set_release
#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*/
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
smp_mb__before_atomic(); \
__ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
#define atomic_add_return_relaxed atomic_add_return
#define atomic_add_return_acquire atomic_add_return
#define atomic_add_return_release atomic_add_return
#else /* atomic_add_return_relaxed */
#ifndef atomic_add_return_acquire
#define atomic_add_return_acquire(...) \
__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
#endif
#ifndef atomic_add_return_release
#define atomic_add_return_release(...) \
__atomic_op_release(atomic_add_return, __VA_ARGS__)
#endif
#ifndef atomic_add_return
#define atomic_add_return(...) \
__atomic_op_fence(atomic_add_return, __VA_ARGS__)
#endif
#endif /* atomic_add_return_relaxed */
/* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return
#define atomic_sub_return_acquire atomic_sub_return
#define atomic_sub_return_release atomic_sub_return
#else /* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_acquire
#define atomic_sub_return_acquire(...) \
__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
#endif
#ifndef atomic_sub_return_release
#define atomic_sub_return_release(...) \
__atomic_op_release(atomic_sub_return, __VA_ARGS__)
#endif
#ifndef atomic_sub_return
#define atomic_sub_return(...) \
__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
#endif
#endif /* atomic_sub_return_relaxed */
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_relaxed
#define atomic_xchg_relaxed atomic_xchg
#define atomic_xchg_acquire atomic_xchg
#define atomic_xchg_release atomic_xchg
#else /* atomic_xchg_relaxed */
#ifndef atomic_xchg_acquire
#define atomic_xchg_acquire(...) \
__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
#endif
#ifndef atomic_xchg_release
#define atomic_xchg_release(...) \
__atomic_op_release(atomic_xchg, __VA_ARGS__)
#endif
#ifndef atomic_xchg
#define atomic_xchg(...) \
__atomic_op_fence(atomic_xchg, __VA_ARGS__)
#endif
#endif /* atomic_xchg_relaxed */
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_relaxed atomic_cmpxchg
#define atomic_cmpxchg_acquire atomic_cmpxchg
#define atomic_cmpxchg_release atomic_cmpxchg
#else /* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_acquire
#define atomic_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic_cmpxchg_release
#define atomic_cmpxchg_release(...) \
__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic_cmpxchg
#define atomic_cmpxchg(...) \
__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic_cmpxchg_relaxed */
#ifndef atomic64_read_acquire
#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
#ifndef atomic64_set_release
#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
/* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_relaxed
#define atomic64_add_return_relaxed atomic64_add_return
#define atomic64_add_return_acquire atomic64_add_return
#define atomic64_add_return_release atomic64_add_return
#else /* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_acquire
#define atomic64_add_return_acquire(...) \
__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
#endif
#ifndef atomic64_add_return_release
#define atomic64_add_return_release(...) \
__atomic_op_release(atomic64_add_return, __VA_ARGS__)
#endif
#ifndef atomic64_add_return
#define atomic64_add_return(...) \
__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
#endif
#endif /* atomic64_add_return_relaxed */
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return
#define atomic64_sub_return_acquire atomic64_sub_return
#define atomic64_sub_return_release atomic64_sub_return
#else /* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_acquire
#define atomic64_sub_return_acquire(...) \
__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
#endif
#ifndef atomic64_sub_return_release
#define atomic64_sub_return_release(...) \
__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
#endif
#ifndef atomic64_sub_return
#define atomic64_sub_return(...) \
__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
#endif
#endif /* atomic64_sub_return_relaxed */
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_relaxed atomic64_xchg
#define atomic64_xchg_acquire atomic64_xchg
#define atomic64_xchg_release atomic64_xchg
#else /* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_acquire
#define atomic64_xchg_acquire(...) \
__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
#endif
#ifndef atomic64_xchg_release
#define atomic64_xchg_release(...) \
__atomic_op_release(atomic64_xchg, __VA_ARGS__)
#endif
#ifndef atomic64_xchg
#define atomic64_xchg(...) \
__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
#endif
#endif /* atomic64_xchg_relaxed */
/* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_relaxed
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
#define atomic64_cmpxchg_acquire atomic64_cmpxchg
#define atomic64_cmpxchg_release atomic64_cmpxchg
#else /* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_acquire
#define atomic64_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic64_cmpxchg_release
#define atomic64_cmpxchg_release(...) \
__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
#endif
#ifndef atomic64_cmpxchg
#define atomic64_cmpxchg(...) \
__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic64_cmpxchg_relaxed */
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
#define cmpxchg_acquire cmpxchg
#define cmpxchg_release cmpxchg
#else /* cmpxchg_relaxed */
#ifndef cmpxchg_acquire
#define cmpxchg_acquire(...) \
__atomic_op_acquire(cmpxchg, __VA_ARGS__)
#endif
#ifndef cmpxchg_release
#define cmpxchg_release(...) \
__atomic_op_release(cmpxchg, __VA_ARGS__)
#endif
#ifndef cmpxchg
#define cmpxchg(...) \
__atomic_op_fence(cmpxchg, __VA_ARGS__)
#endif
#endif /* cmpxchg_relaxed */
/* cmpxchg64_relaxed */
#ifndef cmpxchg64_relaxed
#define cmpxchg64_relaxed cmpxchg64
#define cmpxchg64_acquire cmpxchg64
#define cmpxchg64_release cmpxchg64
#else /* cmpxchg64_relaxed */
#ifndef cmpxchg64_acquire
#define cmpxchg64_acquire(...) \
__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
#endif
#ifndef cmpxchg64_release
#define cmpxchg64_release(...) \
__atomic_op_release(cmpxchg64, __VA_ARGS__)
#endif
#ifndef cmpxchg64
#define cmpxchg64(...) \
__atomic_op_fence(cmpxchg64, __VA_ARGS__)
#endif
#endif /* cmpxchg64_relaxed */
/* xchg_relaxed */
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
#define xchg_release xchg
#else /* xchg_relaxed */
#ifndef xchg_acquire
#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
#endif
#ifndef xchg_release
#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
#endif
#ifndef xchg
#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
#endif
#endif /* xchg_relaxed */
/**
* atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
#ifndef atomic_andnot
static inline void atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
}
#endif
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_andnot(mask, v);
}
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
atomic_or(mask, v);
}
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
@@ -111,21 +451,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
static inline void atomic_or(int i, atomic_t *v)
{
int old;
int new;
do {
old = atomic_read(v);
new = old | i;
} while (atomic_cmpxchg(v, old, new) != old);
}
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
atomic64_and(~i, v);
}
#endif
#endif /* _LINUX_ATOMIC_H */

View File

@@ -27,6 +27,9 @@
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
struct audit_sig_info {
uid_t uid;
pid_t pid;
@@ -59,6 +62,7 @@ struct audit_krule {
struct audit_field *inode_f; /* quick access to an inode field */
struct audit_watch *watch; /* associated watch */
struct audit_tree *tree; /* associated watched tree */
struct audit_fsnotify_mark *exe;
struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
struct list_head list; /* for AUDIT_LIST* purposes only */
u64 prio;

View File

@@ -3,28 +3,43 @@
/* Exponentially weighted moving average (EWMA) */
/* For more documentation see lib/average.c */
struct ewma {
unsigned long internal;
unsigned long factor;
unsigned long weight;
};
extern void ewma_init(struct ewma *avg, unsigned long factor,
unsigned long weight);
extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
/**
* ewma_read() - Get average value
* @avg: Average structure
*
* Returns the average value held in @avg.
*/
static inline unsigned long ewma_read(const struct ewma *avg)
{
return avg->internal >> avg->factor;
}
#define DECLARE_EWMA(name, _factor, _weight) \
struct ewma_##name { \
unsigned long internal; \
}; \
static inline void ewma_##name##_init(struct ewma_##name *e) \
{ \
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
e->internal = 0; \
} \
static inline unsigned long \
ewma_##name##_read(struct ewma_##name *e) \
{ \
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
return e->internal >> ilog2(_factor); \
} \
static inline void ewma_##name##_add(struct ewma_##name *e, \
unsigned long val) \
{ \
unsigned long internal = ACCESS_ONCE(e->internal); \
unsigned long weight = ilog2(_weight); \
unsigned long factor = ilog2(_factor); \
\
BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
\
ACCESS_ONCE(e->internal) = internal ? \
(((internal << weight) - internal) + \
(val << factor)) >> weight : \
(val << factor); \
}
#endif /* _LINUX_AVERAGE_H */

View File

@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -252,13 +253,19 @@ int inode_congested(struct inode *inode, int cong_bits);
* @inode: inode of interest
*
* cgroup writeback requires support from both the bdi and filesystem.
* Test whether @inode has both.
* Also, both memcg and iocg have to be on the default hierarchy. Test
* whether all conditions are met.
*
* Note that the test result may change dynamically on the same inode
* depending on how memcg and iocg are configured.
*/
static inline bool inode_cgwb_enabled(struct inode *inode)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
return bdi_cap_account_dirty(bdi) &&
return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
cgroup_on_dfl(blkcg_root_css->cgroup) &&
bdi_cap_account_dirty(bdi) &&
(bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
@@ -286,7 +293,7 @@ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi
* %current's blkcg equals the effective blkcg of its memcg. No
* need to use the relatively expensive cgroup_get_e_css().
*/
if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
return wb;
return NULL;
}
@@ -402,7 +409,7 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
}
struct wb_iter {
int start_blkcg_id;
int start_memcg_id;
struct radix_tree_iter tree_iter;
void **slot;
};
@@ -414,9 +421,9 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
WARN_ON_ONCE(!rcu_read_lock_held());
if (iter->start_blkcg_id >= 0) {
iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
iter->start_blkcg_id = -1;
if (iter->start_memcg_id >= 0) {
iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
iter->start_memcg_id = -1;
} else {
iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
}
@@ -430,30 +437,30 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
struct backing_dev_info *bdi,
int start_blkcg_id)
int start_memcg_id)
{
iter->start_blkcg_id = start_blkcg_id;
iter->start_memcg_id = start_memcg_id;
if (start_blkcg_id)
if (start_memcg_id)
return __wb_iter_next(iter, bdi);
else
return &bdi->wb;
}
/**
* bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
* bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
* @wb_cur: cursor struct bdi_writeback pointer
* @bdi: bdi to walk wb's of
* @iter: pointer to struct wb_iter to be used as iteration buffer
* @start_blkcg_id: blkcg ID to start iteration from
* @start_memcg_id: memcg ID to start iteration from
*
* Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
* blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
* memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
* to be used as temp storage during iteration. rcu_read_lock() must be
* held throughout iteration.
*/
#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
(wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
#else /* CONFIG_CGROUP_WRITEBACK */

View File

@@ -75,5 +75,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
#endif /* __BASIC_MMIO_GPIO_H */

View File

@@ -640,7 +640,6 @@ struct bcma_drv_cc {
spinlock_t gpio_lock;
#ifdef CONFIG_BCMA_DRIVER_GPIO
struct gpio_chip gpio;
struct irq_domain *irq_domain;
#endif
};

View File

@@ -186,17 +186,6 @@ static inline void *bio_data(struct bio *bio)
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
*/
static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
{
return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
}
#define bio_io_error(bio) bio_endio((bio), -EIO)
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
@@ -306,6 +295,21 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
atomic_set(&bio->__bi_cnt, count);
}
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
return (bio->bi_flags & (1U << bit)) != 0;
}
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
{
bio->bi_flags |= (1U << bit);
}
static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
{
bio->bi_flags &= ~(1U << bit);
}
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -426,7 +430,14 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
extern void bio_endio(struct bio *, int);
extern void bio_endio(struct bio *);
static inline void bio_io_error(struct bio *bio)
{
bio->bi_error = -EIO;
bio_endio(bio);
}
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -440,7 +451,6 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
const struct iov_iter *, gfp_t);
@@ -717,7 +727,7 @@ extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_prep(struct bio *);
extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_endio(struct bio *);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);

View File

@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));

View File

@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count)
static inline int get_bitmask_order(unsigned int count)
{
int order;
@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */
}
static __inline__ int get_count_order(unsigned int count)
static inline int get_count_order(unsigned int count)
{
int order;
@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count)
return order;
}
static inline unsigned long hweight_long(unsigned long w)
static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}

View File

@@ -14,12 +14,15 @@
*/
#include <linux/cgroup.h>
#include <linux/u64_stats_sync.h>
#include <linux/percpu_counter.h>
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
#include <linux/atomic.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
@@ -45,7 +48,7 @@ struct blkcg {
struct blkcg_gq *blkg_hint;
struct hlist_head blkg_list;
struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
struct list_head all_blkcgs_node;
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -53,14 +56,19 @@ struct blkcg {
#endif
};
/*
* blkg_[rw]stat->aux_cnt is excluded for local stats but included for
* recursive. Used to carry stats of dead children, and, for blkg_rwstat,
* to carry result values from read and sum operations.
*/
struct blkg_stat {
struct u64_stats_sync syncp;
uint64_t cnt;
struct percpu_counter cpu_cnt;
atomic64_t aux_cnt;
};
struct blkg_rwstat {
struct u64_stats_sync syncp;
uint64_t cnt[BLKG_RWSTAT_NR];
struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
atomic64_t aux_cnt[BLKG_RWSTAT_NR];
};
/*
@@ -68,32 +76,28 @@ struct blkg_rwstat {
* request_queue (q). This is used by blkcg policies which need to track
* information per blkcg - q pair.
*
* There can be multiple active blkcg policies and each has its private
* data on each blkg, the size of which is determined by
* blkcg_policy->pd_size. blkcg core allocates and frees such areas
* together with blkg and invokes pd_init/exit_fn() methods.
*
* Such private data must embed struct blkg_policy_data (pd) at the
* beginning and pd_size can't be smaller than pd.
* There can be multiple active blkcg policies and each blkg:policy pair is
* represented by a blkg_policy_data which is allocated and freed by each
* policy's pd_alloc/free_fn() methods. A policy can allocate private data
* area by allocating larger data structure which embeds blkg_policy_data
* at the beginning.
*/
struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
/* used during policy activation */
struct list_head alloc_node;
};
/*
* Policies that need to keep per-blkcg data which is independent
* from any request_queue associated to it must specify its size
* with the cpd_size field of the blkcg_policy structure and
* embed a blkcg_policy_data in it. cpd_init() is invoked to let
* each policy handle per-blkcg data.
* Policies that need to keep per-blkcg data which is independent from any
* request_queue associated to it should implement cpd_alloc/free_fn()
* methods. A policy can allocate private data area by allocating larger
* data structure which embeds blkcg_policy_data at the beginning.
* cpd_init() is invoked to let each policy handle per-blkcg data.
*/
struct blkcg_policy_data {
/* the policy id this per-policy data belongs to */
/* the blkcg and policy id this per-policy data belongs to */
struct blkcg *blkcg;
int plid;
};
@@ -123,40 +127,50 @@ struct blkcg_gq {
/* is this blkg online? protected by both blkcg and q locks */
bool online;
struct blkg_rwstat stat_bytes;
struct blkg_rwstat stat_ios;
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
};
typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
struct blkcg_policy {
int plid;
/* policy specific private data size */
size_t pd_size;
/* policy specific per-blkcg data size */
size_t cpd_size;
/* cgroup files for the policy */
struct cftype *cftypes;
struct cftype *dfl_cftypes;
struct cftype *legacy_cftypes;
/* operations */
blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
blkcg_pol_init_cpd_fn *cpd_init_fn;
blkcg_pol_free_cpd_fn *cpd_free_fn;
blkcg_pol_bind_cpd_fn *cpd_bind_fn;
blkcg_pol_alloc_pd_fn *pd_alloc_fn;
blkcg_pol_init_pd_fn *pd_init_fn;
blkcg_pol_online_pd_fn *pd_online_fn;
blkcg_pol_offline_pd_fn *pd_offline_fn;
blkcg_pol_exit_pd_fn *pd_exit_fn;
blkcg_pol_free_pd_fn *pd_free_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -171,6 +185,7 @@ int blkcg_activate_policy(struct request_queue *q,
void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
struct blkg_policy_data *, int),
@@ -182,19 +197,24 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
int off);
int blkg_print_stat_bytes(struct seq_file *sf, void *v);
int blkg_print_stat_ios(struct seq_file *sf, void *v);
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
int off);
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
struct blkcg_policy *pol, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
struct blkcg_policy *pol, int off);
struct blkg_conf_ctx {
struct gendisk *disk;
struct blkcg_gq *blkg;
u64 v;
char *body;
};
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
const char *input, struct blkg_conf_ctx *ctx);
char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
@@ -205,7 +225,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
return css_to_blkcg(task_css(tsk, io_cgrp_id));
}
static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -218,7 +238,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
return task_get_css(task, blkio_cgrp_id);
return task_get_css(task, io_cgrp_id);
}
/**
@@ -232,6 +252,52 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
return css_to_blkcg(blkcg->css.parent);
}
/**
* __blkg_lookup - internal version of blkg_lookup()
* @blkcg: blkcg of interest
* @q: request_queue of interest
* @update_hint: whether to update lookup hint with the result or not
*
* This is internal version and shouldn't be used by policy
* implementations. Looks up blkgs for the @blkcg - @q pair regardless of
* @q's bypass state. If @update_hint is %true, the caller should be
* holding @q->queue_lock and lookup hint is updated on success.
*/
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
struct request_queue *q,
bool update_hint)
{
struct blkcg_gq *blkg;
if (blkcg == &blkcg_root)
return q->root_blkg;
blkg = rcu_dereference(blkcg->blkg_hint);
if (blkg && blkg->q == q)
return blkg;
return blkg_lookup_slowpath(blkcg, q, update_hint);
}
/**
* blkg_lookup - lookup blkg for the specified blkcg - q pair
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
* Lookup blkg for the @blkcg - @q pair. This function should be called
* under RCU read lock and is guaranteed to return %NULL if @q is bypassing
* - see blk_queue_bypass_start() for details.
*/
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
if (unlikely(blk_queue_bypass(q)))
return NULL;
return __blkg_lookup(blkcg, q, false);
}
/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
@@ -248,7 +314,7 @@ static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
struct blkcg_policy *pol)
{
return blkcg ? blkcg->pd[pol->plid] : NULL;
return blkcg ? blkcg->cpd[pol->plid] : NULL;
}
/**
@@ -262,6 +328,11 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
return pd ? pd->blkg : NULL;
}
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
return cpd ? cpd->blkcg : NULL;
}
/**
* blkg_path - format cgroup path of blkg
* @blkg: blkg of interest
@@ -309,9 +380,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
}
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
bool update_hint);
/**
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant
@@ -373,8 +441,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
* or if either the blkcg or queue is going away. Fall back to
* root_rl in such cases.
*/
blkg = blkg_lookup_create(blkcg, q);
if (unlikely(IS_ERR(blkg)))
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg))
goto root_rl;
blkg_get(blkg);
@@ -394,8 +462,7 @@ root_rl:
*/
static inline void blk_put_rl(struct request_list *rl)
{
/* root_rl may not have blkg set */
if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
if (rl->blkg->blkcg != &blkcg_root)
blkg_put(rl->blkg);
}
@@ -433,9 +500,21 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
static inline void blkg_stat_init(struct blkg_stat *stat)
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
{
u64_stats_init(&stat->syncp);
int ret;
ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
if (ret)
return ret;
atomic64_set(&stat->aux_cnt, 0);
return 0;
}
static inline void blkg_stat_exit(struct blkg_stat *stat)
{
percpu_counter_destroy(&stat->cpu_cnt);
}
/**
@@ -443,34 +522,21 @@ static inline void blkg_stat_init(struct blkg_stat *stat)
* @stat: target blkg_stat
* @val: value to add
*
* Add @val to @stat. The caller is responsible for synchronizing calls to
* this function.
* Add @val to @stat. The caller must ensure that IRQ on the same CPU
* don't re-enter this function for the same counter.
*/
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
u64_stats_update_begin(&stat->syncp);
stat->cnt += val;
u64_stats_update_end(&stat->syncp);
__percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
* blkg_stat_read - read the current value of a blkg_stat
* @stat: blkg_stat to read
*
* Read the current value of @stat. This function can be called without
* synchroniztion and takes care of u64 atomicity.
*/
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
unsigned int start;
uint64_t v;
do {
start = u64_stats_fetch_begin_irq(&stat->syncp);
v = stat->cnt;
} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
return v;
return percpu_counter_sum_positive(&stat->cpu_cnt);
}
/**
@@ -479,24 +545,46 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
*/
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
stat->cnt = 0;
percpu_counter_set(&stat->cpu_cnt, 0);
atomic64_set(&stat->aux_cnt, 0);
}
/**
* blkg_stat_merge - merge a blkg_stat into another
* blkg_stat_add_aux - add a blkg_stat into another's aux count
* @to: the destination blkg_stat
* @from: the source
*
* Add @from's count to @to.
* Add @from's count including the aux one to @to's aux count.
*/
static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
static inline void blkg_stat_add_aux(struct blkg_stat *to,
struct blkg_stat *from)
{
blkg_stat_add(to, blkg_stat_read(from));
atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
&to->aux_cnt);
}
static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
{
u64_stats_init(&rwstat->syncp);
int i, ret;
for (i = 0; i < BLKG_RWSTAT_NR; i++) {
ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
if (ret) {
while (--i >= 0)
percpu_counter_destroy(&rwstat->cpu_cnt[i]);
return ret;
}
atomic64_set(&rwstat->aux_cnt[i], 0);
}
return 0;
}
static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
{
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
percpu_counter_destroy(&rwstat->cpu_cnt[i]);
}
/**
@@ -511,39 +599,38 @@ static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
int rw, uint64_t val)
{
u64_stats_update_begin(&rwstat->syncp);
struct percpu_counter *cnt;
if (rw & REQ_WRITE)
rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
rwstat->cnt[BLKG_RWSTAT_READ] += val;
if (rw & REQ_SYNC)
rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
else
rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
u64_stats_update_end(&rwstat->syncp);
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
if (rw & REQ_SYNC)
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
* blkg_rwstat_read - read the current values of a blkg_rwstat
* @rwstat: blkg_rwstat to read
*
* Read the current snapshot of @rwstat and return it as the return value.
* This function can be called without synchronization and takes care of
* u64 atomicity.
* Read the current snapshot of @rwstat and return it in the aux counts.
*/
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
{
unsigned int start;
struct blkg_rwstat tmp;
struct blkg_rwstat result;
int i;
do {
start = u64_stats_fetch_begin_irq(&rwstat->syncp);
tmp = *rwstat;
} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
return tmp;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
atomic64_set(&result.aux_cnt[i],
percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
return result;
}
/**
@@ -558,7 +645,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
{
struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
}
/**
@@ -567,26 +655,71 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
*/
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++) {
percpu_counter_set(&rwstat->cpu_cnt[i], 0);
atomic64_set(&rwstat->aux_cnt[i], 0);
}
}
/**
* blkg_rwstat_merge - merge a blkg_rwstat into another
* blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
* @to: the destination blkg_rwstat
* @from: the source
*
* Add @from's counts to @to.
* Add @from's count including the aux one to @to's aux count.
*/
static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
struct blkg_rwstat *from)
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
struct blkg_rwstat *from)
{
struct blkg_rwstat v = blkg_rwstat_read(from);
int i;
u64_stats_update_begin(&to->syncp);
for (i = 0; i < BLKG_RWSTAT_NR; i++)
to->cnt[i] += v.cnt[i];
u64_stats_update_end(&to->syncp);
atomic64_add(atomic64_read(&v.aux_cnt[i]) +
atomic64_read(&from->aux_cnt[i]),
&to->aux_cnt[i]);
}
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio) { return false; }
#endif
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool throtl = false;
rcu_read_lock();
blkcg = bio_blkcg(bio);
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
spin_unlock_irq(q->queue_lock);
}
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
blkg = blkg ?: q->root_blkg;
blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
bio->bi_iter.bi_size);
blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
}
rcu_read_unlock();
return !throtl;
}
#else /* CONFIG_BLK_CGROUP */
@@ -642,6 +775,9 @@ static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

View File

@@ -14,7 +14,7 @@ struct page;
struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *);
/*
@@ -46,7 +46,8 @@ struct bvec_iter {
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
unsigned long bi_flags; /* status, command, etc */
unsigned int bi_flags; /* status, command, etc */
int bi_error;
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
*/
@@ -111,16 +112,14 @@ struct bio {
/*
* bio flags
*/
#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
#define BIO_CLONED 2 /* doesn't own data */
#define BIO_BOUNCED 3 /* bio is a bounce bio */
#define BIO_USER_MAPPED 4 /* contains user pages */
#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
#define BIO_QUIET 6 /* Make BIO Quiet */
#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -129,14 +128,12 @@ struct bio {
#define BIO_RESET_BITS 13
#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)

View File

@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
struct bio_vec;
struct bvec_merge_data {
struct block_device *bi_bdev;
sector_t bi_sector;
unsigned bi_size;
unsigned long bi_rw;
};
typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
struct bio_vec *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
@@ -258,6 +250,7 @@ struct blk_queue_tag {
struct queue_limits {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
unsigned int chunk_sectors;
@@ -268,6 +261,7 @@ struct queue_limits {
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
unsigned int max_write_same_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -305,7 +299,6 @@ struct request_queue {
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
merge_bvec_fn *merge_bvec_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
@@ -462,6 +455,7 @@ struct request_queue {
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set *bio_split;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -486,7 +480,6 @@ struct request_queue {
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -591,7 +584,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
/*
* Driver can handle struct request, if it either has an old style
@@ -782,6 +775,8 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_queue_split(struct request_queue *, struct bio **,
struct bio_set *);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -986,9 +981,9 @@ extern int blk_queue_dma_drain(struct request_queue *q,
void *buf, unsigned int size);
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1138,6 +1133,7 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
@@ -1154,6 +1150,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
return q->limits.seg_boundary_mask;
}
static inline unsigned long queue_virt_boundary(struct request_queue *q)
{
return q->limits.virt_boundary_mask;
}
static inline unsigned int queue_max_sectors(struct request_queue *q)
{
return q->limits.max_sectors;
@@ -1354,6 +1355,39 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v);
}
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
*/
static inline bool bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset)
{
if (!queue_virt_boundary(q))
return false;
return offset ||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
if (!bio_has_data(prev))
return false;
return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
next->bi_io_vec[0].bv_offset);
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, req->biotail, bio);
}
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, bio, req->bio);
}
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1480,6 +1514,26 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
struct bio_integrity_payload *bip = bio_integrity(req->bio);
struct bio_integrity_payload *bip_next = bio_integrity(next);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1546,6 +1600,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
{
return 0;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
return false;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -1555,8 +1619,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t,
void **, unsigned long *pfn, long size);
long (*direct_access)(struct block_device *, sector_t, void __pmem **,
unsigned long *pfn);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1574,8 +1638,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
unsigned long *pfn, long size);
extern long bdev_direct_access(struct block_device *, sector_t,
void __pmem **addr, unsigned long *pfn, long size);
#else /* CONFIG_BLOCK */
struct block_device;

View File

@@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/perf_event.h>
struct bpf_map;
@@ -24,6 +25,10 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
void (*map_fd_put_ptr) (void *ptr);
};
struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
bool owner_jited;
union {
char value[0] __aligned(8);
struct bpf_prog *prog[0] __aligned(8);
void *ptrs[0] __aligned(8);
};
};
#define MAX_TAIL_CALL_CNT 32
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
void bpf_prog_array_map_clear(struct bpf_map *map);
void bpf_fd_array_map_clear(struct bpf_map *map);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_perf_event_read_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
#endif /* _LINUX_BPF_H */

View File

@@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \

View File

@@ -46,6 +46,7 @@ struct ceph_options {
unsigned long mount_timeout; /* jiffies */
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long monc_ping_timeout; /* jiffies */
/*
* any type that can't be simply compared or doesn't need need
@@ -66,6 +67,7 @@ struct ceph_options {
#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)

View File

@@ -238,6 +238,8 @@ struct ceph_connection {
bool out_kvec_is_msg; /* kvec refers to out_msg */
int out_more; /* there is more data after the kvecs */
__le64 out_temp_ack; /* for writing an ack */
struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
stamp */
/* message in temps */
struct ceph_msg_header in_hdr;
@@ -248,6 +250,8 @@ struct ceph_connection {
int in_base_pos; /* bytes read */
__le64 in_temp_ack; /* for reading an ack */
struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
};
@@ -285,6 +289,8 @@ extern void ceph_msg_revoke(struct ceph_msg *msg);
extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
extern void ceph_con_keepalive(struct ceph_connection *con);
extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
unsigned long interval);
extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
size_t length, size_t alignment);

View File

@@ -84,10 +84,12 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_MSG 7 /* message */
#define CEPH_MSGR_TAG_ACK 8 /* message ack */
#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
/*

View File

@@ -34,12 +34,17 @@ struct seq_file;
/* define the enumeration of all cgroup subsystems */
#define SUBSYS(_x) _x ## _cgrp_id,
#define SUBSYS_TAG(_t) CGROUP_ ## _t, \
__unused_tag_ ## _t = CGROUP_ ## _t - 1,
enum cgroup_subsys_id {
#include <linux/cgroup_subsys.h>
CGROUP_SUBSYS_COUNT,
};
#undef SUBSYS_TAG
#undef SUBSYS
#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START)
/* bits in struct cgroup_subsys_state flags field */
enum {
CSS_NO_REF = (1 << 0), /* no reference counting for this css */
@@ -318,7 +323,7 @@ struct cftype {
* end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
int private;
unsigned long private;
/*
* If not 0, file mode is set to this value, otherwise it will
* be figured out automatically
@@ -406,7 +411,9 @@ struct cgroup_subsys {
struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
void (*fork)(struct task_struct *task);
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
void (*exit)(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task);
@@ -434,6 +441,9 @@ struct cgroup_subsys {
int id;
const char *name;
/* optional, initialized automatically during boot if not set */
const char *legacy_name;
/* link to parent, protected by cgroup_lock() */
struct cgroup_root *root;
@@ -463,34 +473,12 @@ struct cgroup_subsys {
unsigned int depends_on;
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_begin() and allows cgroup operations to
* synchronize against threadgroup changes using a percpu_rw_semaphore.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
}
/**
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
* @tsk: target task
*
* Called from threadgroup_change_end(). Counterpart of
* cgroup_threadcgroup_change_begin().
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
percpu_up_read(&cgroup_threadgroup_rwsem);
}
void cgroup_threadgroup_change_begin(struct task_struct *tsk);
void cgroup_threadgroup_change_end(struct task_struct *tsk);
#else /* CONFIG_CGROUPS */
#define CGROUP_CANFORK_COUNT 0
#define CGROUP_SUBSYS_COUNT 0
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}

View File

@@ -22,6 +22,15 @@
#ifdef CONFIG_CGROUPS
/*
* All weight knobs on the default hierarhcy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
#define CGROUP_WEIGHT_MIN 1
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
@@ -62,7 +71,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
void cgroup_post_fork(struct task_struct *p);
extern int cgroup_can_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]);
extern void cgroup_cancel_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]);
extern void cgroup_post_fork(struct task_struct *p,
void *old_ss_priv[CGROUP_CANFORK_COUNT]);
void cgroup_exit(struct task_struct *p);
int cgroup_init_early(void);
@@ -524,7 +538,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {}
static inline int cgroup_can_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT])
{ return 0; }
static inline void cgroup_cancel_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]) {}
static inline void cgroup_post_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]) {}
static inline void cgroup_exit(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }

View File

@@ -3,6 +3,17 @@
*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/
/*
* This file *must* be included with SUBSYS() defined.
* SUBSYS_TAG() is a noop if undefined.
*/
#ifndef SUBSYS_TAG
#define __TMP_SUBSYS_TAG
#define SUBSYS_TAG(_x)
#endif
#if IS_ENABLED(CONFIG_CPUSETS)
SUBSYS(cpuset)
#endif
@@ -16,7 +27,7 @@ SUBSYS(cpuacct)
#endif
#if IS_ENABLED(CONFIG_BLK_CGROUP)
SUBSYS(blkio)
SUBSYS(io)
#endif
#if IS_ENABLED(CONFIG_MEMCG)
@@ -47,12 +58,29 @@ SUBSYS(net_prio)
SUBSYS(hugetlb)
#endif
/*
* Subsystems that implement the can_fork() family of callbacks.
*/
SUBSYS_TAG(CANFORK_START)
#if IS_ENABLED(CONFIG_CGROUP_PIDS)
SUBSYS(pids)
#endif
SUBSYS_TAG(CANFORK_END)
/*
* The following subsystems are not supported on the default hierarchy.
*/
#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
SUBSYS(debug)
#endif
#ifdef __TMP_SUBSYS_TAG
#undef __TMP_SUBSYS_TAG
#undef SUBSYS_TAG
#endif
/*
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/

View File

@@ -11,7 +11,6 @@
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -33,10 +32,33 @@
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
struct clk;
struct clk_hw;
struct clk_core;
struct dentry;
/**
* struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require.
*
* @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users.
* @max_rate: Maximum rate a imposed by clk users.
* @best_parent_rate: The best parent rate a parent can provide to fulfill the
* requested constraints.
* @best_parent_hw: The most appropriate parent clock that fulfills the
* requested constraints.
*
*/
struct clk_rate_request {
unsigned long rate;
unsigned long min_rate;
unsigned long max_rate;
unsigned long best_parent_rate;
struct clk_hw *best_parent_hw;
};
/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
@@ -176,12 +198,8 @@ struct clk_ops {
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
long (*determine_rate)(struct clk_hw *hw,
unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_hw);
int (*determine_rate)(struct clk_hw *hw,
struct clk_rate_request *req);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -343,6 +361,9 @@ struct clk_div_table {
* to the closest integer instead of the up one.
* CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
* not be changed by the clock framework.
* CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
* except when the value read from the register is zero, the divisor is
* 2^width of the field.
*/
struct clk_divider {
struct clk_hw hw;
@@ -360,6 +381,7 @@ struct clk_divider {
#define CLK_DIVIDER_HIWORD_MASK BIT(3)
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
extern const struct clk_ops clk_divider_ops;
@@ -549,6 +571,23 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
void of_gpio_clk_gate_setup(struct device_node *node);
/**
* struct clk_gpio_mux - gpio controlled clock multiplexer
*
* @hw: see struct clk_gpio
* @gpiod: gpio descriptor to select the parent of this clock multiplexer
*
* Clock with a gpio control for selecting the parent clock.
* Implements .get_parent, .set_parent and .determine_rate
*/
extern const struct clk_ops clk_gpio_mux_ops;
struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents, unsigned gpio,
bool active_low, unsigned long flags);
void of_gpio_mux_clk_setup(struct device_node *node);
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
@@ -568,31 +607,27 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
/* helper functions */
const char *__clk_get_name(struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
struct clk_hw *__clk_get_hw(struct clk *clk);
u8 __clk_get_num_parents(struct clk *clk);
struct clk *__clk_get_parent(struct clk *clk);
struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
unsigned int index);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
bool __clk_is_prepared(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
bool clk_hw_is_prepared(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_p);
unsigned long __clk_determine_rate(struct clk_hw *core,
unsigned long rate,
unsigned long min_rate,
unsigned long max_rate);
long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_p);
int __clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
struct clk_rate_request *req);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
{
@@ -603,7 +638,7 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
/*
* FIXME clock api without lock protection
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;

View File

@@ -7,6 +7,8 @@
* published by the Free Software Foundation.
*/
#include <linux/types.h>
struct device_node;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)

View File

@@ -16,8 +16,20 @@
#include <linux/types.h>
struct device;
struct device_node;
struct generic_pm_domain;
void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
void cpg_mstp_add_clk_domain(struct device_node *np);
int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
#else
static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
#endif
#endif

View File

@@ -17,7 +17,8 @@
#ifndef __LINUX_CLK_TEGRA_H_
#define __LINUX_CLK_TEGRA_H_
#include <linux/clk.h>
#include <linux/types.h>
#include <linux/bug.h>
/*
* Tegra CPU clock and reset control ops

View File

@@ -188,33 +188,6 @@ struct clk_hw_omap {
/* DPLL Type and DCO Selection Flags */
#define DPLL_J_TYPE 0x1
/* Composite clock component types */
enum {
CLK_COMPONENT_TYPE_GATE = 0,
CLK_COMPONENT_TYPE_DIVIDER,
CLK_COMPONENT_TYPE_MUX,
CLK_COMPONENT_TYPE_MAX,
};
/**
* struct ti_dt_clk - OMAP DT clock alias declarations
* @lk: clock lookup definition
* @node_name: clock DT node to map to
*/
struct ti_dt_clk {
struct clk_lookup lk;
char *node_name;
};
#define DT_CLK(dev, con, name) \
{ \
.lk = { \
.dev_id = dev, \
.con_id = con, \
}, \
.node_name = name, \
}
/* Static memmap indices */
enum {
TI_CLKM_CM = 0,
@@ -225,8 +198,6 @@ enum {
CLK_MAX_MEMMAPS
};
typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
/**
* struct clk_omap_reg - OMAP register declaration
* @offset: offset from the master IP module base address
@@ -238,98 +209,62 @@ struct clk_omap_reg {
};
/**
* struct ti_clk_ll_ops - low-level register access ops for a clock
* struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
* @clkdm_clk_enable: pointer to clockdomain enable function
* @clkdm_clk_disable: pointer to clockdomain disable function
* @cm_wait_module_ready: pointer to CM module wait ready function
* @cm_split_idlest_reg: pointer to CM module function to split idlest reg
*
* Low-level register access ops are generally used by the basic clock types
* (clk-gate, clk-mux, clk-divider etc.) to provide support for various
* low-level hardware interfaces (direct MMIO, regmap etc.), but can also be
* used by other hardware-specific clock drivers if needed.
* Low-level ops are generally used by the basic clock types (clk-gate,
* clk-mux, clk-divider etc.) to provide support for various low-level
* hadrware interfaces (direct MMIO, regmap etc.), and is initialized
* by board code. Low-level ops also contain some other platform specific
* operations not provided directly by clock drivers.
*/
struct ti_clk_ll_ops {
u32 (*clk_readl)(void __iomem *reg);
void (*clk_writel)(u32 val, void __iomem *reg);
int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
int (*clkdm_clk_disable)(struct clockdomain *clkdm,
struct clk *clk);
int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
u8 idlest_shift);
int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id);
};
extern struct ti_clk_ll_ops *ti_clk_ll_ops;
extern const struct clk_ops ti_clk_divider_ops;
extern const struct clk_ops ti_clk_mux_ops;
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
void omap2_init_clk_hw_omap_clocks(struct clk *clk);
int omap3_noncore_dpll_enable(struct clk_hw *hw);
void omap3_noncore_dpll_disable(struct clk_hw *hw);
int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate,
u8 index);
long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_clk);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
unsigned long parent_rate);
long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
unsigned long target_rate,
unsigned long *parent_rate);
long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_clk);
u8 omap2_init_dpll_parent(struct clk_hw *hw);
unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
unsigned long *parent_rate);
void omap2_init_clk_clkdm(struct clk_hw *clk);
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate);
int omap2_clkops_enable_clkdm(struct clk_hw *hw);
void omap2_clkops_disable_clkdm(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index);
int omap2_dflt_clk_enable(struct clk_hw *hw);
void omap2_dflt_clk_disable(struct clk_hw *hw);
int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
void omap3_clk_lock_dpll5(void);
int omap2_clk_enable_autoidle_all(void);
int omap2_clk_allow_idle(struct clk *clk);
int omap2_clk_deny_idle(struct clk *clk);
unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
unsigned long parent_rate);
int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
unsigned long parent_rate);
void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
void omap2xxx_clkt_vps_init(void);
unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
void ti_dt_clocks_register(struct ti_dt_clk *oclks);
void ti_dt_clk_init_provider(struct device_node *np, int index);
void ti_dt_clk_init_retry_clks(void);
void ti_dt_clockdomains_setup(void);
int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
ti_of_clk_init_cb_t func);
int of_ti_clk_autoidle_setup(struct device_node *node);
int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops);
struct regmap;
int omap2_clk_provider_init(struct device_node *parent, int index,
struct regmap *syscon, void __iomem *mem);
void omap2_clk_legacy_provider_init(int index, void __iomem *mem);
int omap3430_dt_clk_init(void);
int omap3630_dt_clk_init(void);
int am35xx_dt_clk_init(void);
int ti81xx_dt_clk_init(void);
int dm814x_dt_clk_init(void);
int dm816x_dt_clk_init(void);
int omap4xxx_dt_clk_init(void);
int omap5xxx_dt_clk_init(void);
int dra7xx_dt_clk_init(void);
@@ -338,27 +273,24 @@ int am43xx_dt_clk_init(void);
int omap2420_dt_clk_init(void);
int omap2430_dt_clk_init(void);
#ifdef CONFIG_OF
void of_ti_clk_allow_autoidle_all(void);
void of_ti_clk_deny_autoidle_all(void);
#else
static inline void of_ti_clk_allow_autoidle_all(void) { }
static inline void of_ti_clk_deny_autoidle_all(void) { }
#endif
struct ti_clk_features {
u32 flags;
long fint_min;
long fint_max;
long fint_band1_max;
long fint_band2_min;
u8 dpll_bypass_vals;
u8 cm_idlest_val;
};
#define TI_CLK_DPLL_HAS_FREQSEL BIT(0)
#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
extern const struct clk_hw_omap_ops clkhwops_wait;
extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
extern const struct clk_hw_omap_ops clkhwops_iclk;
extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
#ifdef CONFIG_ATAGS
int omap3430_clk_legacy_init(void);

View File

@@ -18,15 +18,6 @@
struct clock_event_device;
struct module;
/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
enum clock_event_mode {
CLOCK_EVT_MODE_UNUSED,
CLOCK_EVT_MODE_SHUTDOWN,
CLOCK_EVT_MODE_PERIODIC,
CLOCK_EVT_MODE_ONESHOT,
CLOCK_EVT_MODE_RESUME,
};
/*
* Possible states of a clock event device.
*
@@ -86,16 +77,14 @@ enum clock_event_state {
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
* @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
* @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
* @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
* @set_state_periodic: switch state to periodic, if !set_mode
* @set_state_oneshot: switch state to oneshot, if !set_mode
* @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
* @set_state_shutdown: switch state to shutdown, if !set_mode
* @tick_resume: resume clkevt device, if !set_mode
* @set_state_periodic: switch state to periodic
* @set_state_oneshot: switch state to oneshot
* @set_state_oneshot_stopped: switch state to oneshot_stopped
* @set_state_shutdown: switch state to shutdown
* @tick_resume: resume clkevt device
* @broadcast: function to broadcast events
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -116,18 +105,10 @@ struct clock_event_device {
u64 min_delta_ns;
u32 mult;
u32 shift;
enum clock_event_mode mode;
enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
/*
* State transition callback(s): Only one of the two groups should be
* defined:
* - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
* - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
*/
void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
int (*set_state_periodic)(struct clock_event_device *);
int (*set_state_oneshot)(struct clock_event_device *);
int (*set_state_oneshot_stopped)(struct clock_event_device *);
@@ -234,13 +215,10 @@ static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
extern int clockevents_notify(unsigned long reason, void *arg);
#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
static inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }

View File

@@ -252,7 +252,12 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(x)) (val) }; \
__write_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
/**
* READ_ONCE_CTRL - Read a value heading a control dependency

View File

@@ -49,13 +49,28 @@ static inline void exception_exit(enum ctx_state prev_ctx)
}
}
/**
* ct_state() - return the current context tracking state if known
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
* CONTEXT_DISABLED. This should be used primarily for debugging.
*/
static inline enum ctx_state ct_state(void)
{
return context_tracking_is_enabled() ?
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
}
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
#endif /* !CONFIG_CONTEXT_TRACKING */
#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
extern void context_tracking_init(void);

View File

@@ -14,6 +14,7 @@ struct context_tracking {
bool active;
int recursion;
enum ctx_state {
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,

View File

@@ -14,6 +14,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
#include <linux/sched.h>
/* Peripheral id registers (0xFD0-0xFEC) */
#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -248,4 +249,24 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
#endif
#ifdef CONFIG_PID_NS
static inline unsigned long
coresight_vpid_to_pid(unsigned long vpid)
{
struct task_struct *task = NULL;
unsigned long pid = 0;
rcu_read_lock();
task = find_task_by_vpid(vpid);
if (task)
pid = task_pid_nr(task);
rcu_read_unlock();
return pid;
}
#else
static inline unsigned long
coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
#endif
#endif

View File

@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
__u64 mm_reg_addr;
};
/* Memory Error Section */
/* Old Memory Error Section UEFI 2.1, 2.2 */
struct cper_sec_mem_err_old {
__u64 validation_bits;
__u64 error_status;
__u64 physical_addr;
__u64 physical_addr_mask;
__u16 node;
__u16 card;
__u16 module;
__u16 bank;
__u16 device;
__u16 row;
__u16 column;
__u16 bit_pos;
__u64 requestor_id;
__u64 responder_id;
__u64 target_id;
__u8 error_type;
};
/* Memory Error Section UEFI >= 2.3 */
struct cper_sec_mem_err {
__u64 validation_bits;
__u64 error_status;

View File

@@ -11,6 +11,7 @@
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <asm/cpufeature.h>
@@ -43,16 +44,16 @@
* For a list of legal values for 'feature', please consult the file
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __init) \
#define module_cpu_feature_match(x, __initfunc) \
static struct cpu_feature const cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
static int cpu_feature_match_ ## x ## _init(void) \
static int __init cpu_feature_match_ ## x ## _init(void) \
{ \
if (!cpu_have_feature(cpu_feature(x))) \
return -ENODEV; \
return __init(); \
return __initfunc(); \
} \
module_init(cpu_feature_match_ ## x ## _init)

View File

@@ -51,17 +51,16 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
struct cpufreq_real_policy {
struct cpufreq_user_policy {
unsigned int min; /* in kHz */
unsigned int max; /* in kHz */
unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
};
struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
cpumask_var_t related_cpus; /* Online + Offline CPUs */
cpumask_var_t real_cpus; /* Related and present */
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
@@ -87,7 +86,7 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
struct cpufreq_real_policy user_policy;
struct cpufreq_user_policy user_policy;
struct cpufreq_frequency_table *freq_table;
struct list_head policy_list;
@@ -128,9 +127,14 @@ struct cpufreq_policy {
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
#else
static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
return NULL;
}
static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
return NULL;
@@ -368,11 +372,10 @@ static inline void cpufreq_resume(void) {}
/* Policy Notifiers */
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_CREATE_POLICY (4)
#define CPUFREQ_REMOVE_POLICY (5)
#define CPUFREQ_NOTIFY (1)
#define CPUFREQ_START (2)
#define CPUFREQ_CREATE_POLICY (3)
#define CPUFREQ_REMOVE_POLICY (4)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -577,6 +580,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
int cpufreq_boost_trigger_state(int state);
int cpufreq_boost_supported(void);
int cpufreq_boost_enabled(void);
int cpufreq_enable_boost_support(void);
bool policy_has_boost_freq(struct cpufreq_policy *policy);
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -590,12 +595,23 @@ static inline int cpufreq_boost_enabled(void)
{
return 0;
}
static inline int cpufreq_enable_boost_support(void)
{
return -EINVAL;
}
static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
return false;
}
#endif
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);

View File

@@ -84,7 +84,6 @@ struct cpuidle_device {
struct list_head device_list;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
int safe_state_index;
cpumask_t coupled_cpus;
struct cpuidle_coupled *coupled;
#endif

View File

@@ -137,6 +137,7 @@ struct cred {
kernel_cap_t cap_permitted; /* caps we're permitted */
kernel_cap_t cap_effective; /* caps we can actually use */
kernel_cap_t cap_bset; /* capability bounding set */
kernel_cap_t cap_ambient; /* Ambient capability set */
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
@@ -212,6 +213,13 @@ static inline void validate_process_creds(void)
}
#endif
static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
return cap_issubset(cred->cap_ambient,
cap_intersect(cred->cap_permitted,
cred->cap_inheritable));
}
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference

View File

@@ -101,12 +101,6 @@
*/
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
* Temporary flag used to prevent legacy AEAD implementations from
* being used by user-space.
*/
#define CRYPTO_ALG_AEAD_NEW 0x00004000
/*
* Transform masks and values (for crt_flags).
*/
@@ -142,13 +136,10 @@
struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_tfm;
struct crypto_type;
struct aead_request;
struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -274,47 +265,6 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
/**
* struct old_aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
* As the authentication tag is a message digest to ensure the
* integrity of the encrypted data, a consumer typically wants the
* largest authentication tag possible as defined by this
* variable.
* @setauthsize: Set authentication size for the AEAD transformation. This
* function is used to specify the consumer requested size of the
* authentication tag to be either generated by the transformation
* during encryption or the size of the authentication tag to be
* supplied during the decryption operation. This function is also
* responsible for checking the authentication tag size for
* validity.
* @setkey: see struct ablkcipher_alg
* @encrypt: see struct ablkcipher_alg
* @decrypt: see struct ablkcipher_alg
* @givencrypt: see struct ablkcipher_alg
* @givdecrypt: see struct ablkcipher_alg
* @geniv: see struct ablkcipher_alg
* @ivsize: see struct ablkcipher_alg
*
* All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
* mandatory and must be filled.
*/
struct old_aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
int (*encrypt)(struct aead_request *req);
int (*decrypt)(struct aead_request *req);
int (*givencrypt)(struct aead_givcrypt_request *req);
int (*givdecrypt)(struct aead_givcrypt_request *req);
const char *geniv;
unsigned int ivsize;
unsigned int maxauthsize;
};
/**
* struct blkcipher_alg - synchronous block cipher definition
* @min_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
* struct crypto_type, which implements callbacks common for all
* transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
* &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
* &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;

39
include/linux/dax.h Normal file
View File

@@ -0,0 +1,39 @@
#ifndef _LINUX_DAX_H
#define _LINUX_DAX_H
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
get_block_t, dio_iodone_t, int flags);
int dax_clear_blocks(struct inode *, sector_t block, long size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t, dax_iodone_t);
int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t, dax_iodone_t);
#else
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags, get_block_t gb,
dax_iodone_t di)
{
return VM_FAULT_FALLBACK;
}
#define __dax_pmd_fault dax_pmd_fault
#endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
#endif

View File

@@ -116,6 +116,12 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
bool debugfs_initialized(void);
ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos);
#else
#include <linux/err.h>
@@ -282,6 +288,20 @@ static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
return ERR_PTR(-ENODEV);
}
static inline ssize_t debugfs_read_file_bool(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
return -ENODEV;
}
static inline ssize_t debugfs_write_file_bool(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
return -ENODEV;
}
#endif
#endif

View File

@@ -65,7 +65,10 @@ struct devfreq_dev_status {
* The "flags" parameter's possible values are
* explained above with "DEVFREQ_FLAG_*" macros.
* @get_dev_status: The device should provide the current performance
* status to devfreq, which is used by governors.
* status to devfreq. Governors are recommended not to
* use this directly. Instead, governors are recommended
* to use devfreq_update_stats() along with
* devfreq.last_status.
* @get_cur_freq: The device should provide the current frequency
* at which it is operating.
* @exit: An optional callback that is called when devfreq
@@ -161,6 +164,7 @@ struct devfreq {
struct delayed_work work;
unsigned long previous_freq;
struct devfreq_dev_status last_status;
void *data; /* private data for governors */
@@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq);
/**
* devfreq_update_stats() - update the last_status pointer in struct devfreq
* @df: the devfreq instance whose status needs updating
*
* Governors are recommended to use this function along with last_status,
* which allows other entities to reuse the last_status without affecting
* the values fetched later by governors.
*/
static inline int devfreq_update_stats(struct devfreq *df)
{
return df->profile->get_dev_status(df->dev.parent, &df->last_status);
}
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
@@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq)
{
}
static inline int devfreq_update_stats(struct devfreq *df)
{
return -EINVAL;
}
#endif /* CONFIG_PM_DEVFREQ */
#endif /* __LINUX_DEVFREQ_H__ */

View File

@@ -82,9 +82,6 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
unsigned long arg);
typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size);
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
@@ -160,7 +157,6 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_ioctl_fn ioctl;
dm_merge_fn merge;
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;

View File

@@ -341,7 +341,7 @@ struct subsys_interface {
struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
};
int subsys_interface_register(struct subsys_interface *sif);
@@ -714,6 +714,8 @@ struct device_dma_parameters {
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
* See Documentation/pinctrl.txt for details.
* @msi_list: Hosts MSI descriptors
* @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -774,9 +776,15 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
struct irq_domain *msi_domain;
#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
#ifdef CONFIG_GENERIC_MSI_IRQ
struct list_head msi_list;
#endif
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
@@ -861,6 +869,22 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
return dev->msi_domain;
#else
return NULL;
#endif
}
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
dev->msi_domain = d;
#endif
}
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
@@ -959,6 +983,8 @@ extern int __must_check device_add(struct device *dev);
extern void device_del(struct device *dev);
extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
extern int device_for_each_child_reverse(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, const char *new_name);

View File

@@ -66,6 +66,7 @@ enum dma_transaction_type {
DMA_XOR_VAL,
DMA_PQ_VAL,
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE,
@@ -183,6 +184,8 @@ struct dma_interleaved_template {
* operation it continues the calculation with new sources
* @DMA_PREP_FENCE - tell the driver that subsequent operations depend
* on the result of this operation
* @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
* cleared or freed
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@ enum dma_ctrl_flags {
DMA_PREP_PQ_DISABLE_Q = (1 << 3),
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
};
/**
@@ -400,6 +404,8 @@ enum dma_residue_granularity {
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
* @descriptor_reuse: if a descriptor can be reused by client and
* resubmitted multiple times
*/
struct dma_slave_caps {
u32 src_addr_widths;
@@ -408,6 +414,7 @@ struct dma_slave_caps {
bool cmd_pause;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
bool descriptor_reuse;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@ struct dma_async_tx_descriptor {
dma_addr_t phys;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct dmaengine_unmap_data *unmap;
@@ -584,6 +592,20 @@ struct dma_tx_state {
u32 residue;
};
/**
* enum dmaengine_alignment - defines alignment of the DMA async tx
* buffers
*/
enum dmaengine_alignment {
DMAENGINE_ALIGN_1_BYTE = 0,
DMAENGINE_ALIGN_2_BYTES = 1,
DMAENGINE_ALIGN_4_BYTES = 2,
DMAENGINE_ALIGN_8_BYTES = 3,
DMAENGINE_ALIGN_16_BYTES = 4,
DMAENGINE_ALIGN_32_BYTES = 5,
DMAENGINE_ALIGN_64_BYTES = 6,
};
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
@@ -616,6 +638,7 @@ struct dma_tx_state {
* @device_prep_dma_pq: prepares a pq operation
* @device_prep_dma_pq_val: prepares a pqzero_sum operation
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@ struct dma_device {
dma_cap_mask_t cap_mask;
unsigned short max_xor;
unsigned short max_pq;
u8 copy_align;
u8 xor_align;
u8 pq_align;
u8 fill_align;
enum dmaengine_alignment copy_align;
enum dmaengine_alignment xor_align;
enum dmaengine_alignment pq_align;
enum dmaengine_alignment fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -682,6 +705,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
struct dma_chan *chan, struct scatterlist *sg,
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
return desc->tx_submit(desc);
}
static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
static inline bool dmaengine_check_align(enum dmaengine_alignment align,
size_t off1, size_t off2, size_t len)
{
size_t mask;
@@ -1155,6 +1182,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
dma_get_slave_caps(tx->chan, &caps);
if (caps.descriptor_reuse) {
tx->flags |= DMA_CTRL_REUSE;
return 0;
} else {
return -EPERM;
}
}
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
{
tx->flags &= ~DMA_CTRL_REUSE;
}
static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
{
return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
}
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
{
/* this is supported for reusable desc, so check that */
if (dmaengine_desc_test_reuse(desc))
return desc->desc_free(desc);
else
return -EPERM;
}
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
static inline struct dma_chan
*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
struct device *dev, char *name)
struct device *dev, const char *name)
{
struct dma_chan *chan;
@@ -1177,6 +1237,9 @@ static inline struct dma_chan
if (chan)
return chan;
if (!fn || !fn_param)
return NULL;
return __dma_request_channel(mask, fn, fn_param);
}
#endif /* DMAENGINE_H */

View File

@@ -24,6 +24,12 @@ void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
}
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*

View File

@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
((a[2] ^ b[2]) & m)) == 0;
(__force int)((a[2] ^ b[2]) & m)) == 0;
#else
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
#endif

View File

@@ -27,8 +27,6 @@
#define __LINUX_EXTCON_H__
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/sysfs.h>
/*
* Define the unique id of supported external connectors
@@ -77,8 +75,6 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
* @print_state: An optional callback to override the method to print the
* status of the extcon device.
* @dev: Device of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
@@ -102,9 +98,6 @@ struct extcon_dev {
const unsigned int *supported_cable;
const u32 *mutually_exclusive;
/* Optional callbacks to override class functions */
ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
/* Internal data. Please do not set. */
struct device dev;
struct raw_notifier_head *nh;

View File

@@ -417,15 +417,25 @@ typedef __le32 f2fs_hash_t;
#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
/* the number of dentry in a block */
#define NR_DENTRY_IN_BLOCK 214
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
/*
* space utilization of regular dentry and inline dentry
* regular dentry inline dentry
* bitmap 1 * 27 = 27 1 * 23 = 23
* reserved 1 * 3 = 3 1 * 7 = 7
* dentry 11 * 214 = 2354 11 * 182 = 2002
* filename 8 * 214 = 1712 8 * 182 = 1456
* total 4096 3488
*
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)

View File

@@ -788,7 +788,7 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {

View File

@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
{
rcu_lockdep_assert(rcu_read_lock_held() ||
lockdep_is_held(&files->file_lock),
RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
!lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
return __fcheck_files(files, fd);
}

View File

@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
offsetof(struct bpf_prog, insns[proglen]));
}
static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
{
/* When classic BPF programs have been loaded and the arch
* does not have a classic BPF JIT (anymore), they have been
* converted via bpf_migrate_filter() to eBPF and thus always
* have an unspec program type.
*/
return prog->type == BPF_PROG_TYPE_UNSPEC;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
bool bpf_helper_changes_skb_data(void *func);
#ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
flen, proglen, pass, image);
pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
proglen, pass, image, current->comm, task_pid_nr(current));
if (image)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);

View File

@@ -1,7 +1,6 @@
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
#include <linux/linkage.h>
#include <linux/wait.h>
#include <linux/kdev_t.h>
@@ -30,6 +29,8 @@
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/blk_types.h>
#include <linux/workqueue.h>
#include <linux/percpu-rwsem.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -51,11 +52,11 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(unsigned long);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
@@ -635,7 +636,7 @@ struct inode {
unsigned long dirtied_time_when;
struct hlist_node i_hash;
struct list_head i_wb_list; /* backing dev IO list */
struct list_head i_io_list; /* backing dev IO list */
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *i_wb; /* the associated cgroup wb */
@@ -942,12 +943,18 @@ struct lock_manager_operations {
struct lock_manager {
struct list_head list;
/*
* NFSv4 and up also want opens blocked during the grace period;
* NLM doesn't care:
*/
bool block_opens;
};
struct net;
void locks_start_grace(struct net *, struct lock_manager *);
void locks_end_grace(struct lock_manager *);
int locks_in_grace(struct net *);
int opens_in_grace(struct net *);
/* that will die - we need it for nfs_lock_info */
#include <linux/nfs_fs_i.h>
@@ -1259,6 +1266,7 @@ struct mm_struct;
/* sb->s_iflags */
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
/* Possible states of 'frozen' field */
enum {
@@ -1273,16 +1281,9 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
/* Counters for counting writers at each level */
struct percpu_counter counter[SB_FREEZE_LEVELS];
wait_queue_head_t wait; /* queue for waiting for
writers / faults to finish */
int frozen; /* Is sb frozen? */
wait_queue_head_t wait_unfrozen; /* queue for waiting for
sb to be thawed */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map lock_map[SB_FREEZE_LEVELS];
#endif
int frozen; /* Is sb frozen? */
wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
struct super_block {
@@ -1308,7 +1309,6 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
struct list_head s_inodes; /* all inodes */
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -1374,11 +1374,18 @@ struct super_block {
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
struct rcu_head rcu;
struct work_struct destroy_work;
struct mutex s_sync_lock; /* sync serialisation lock */
/*
* Indicates how deep in a filesystem stack this SB is
*/
int s_stack_depth;
/* s_inode_list_lock protects s_inodes */
spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
struct list_head s_inodes; /* all inodes */
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1390,6 +1397,11 @@ extern struct timespec current_fs_time(struct super_block *sb);
void __sb_end_write(struct super_block *sb, int level);
int __sb_start_write(struct super_block *sb, int level, bool wait);
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
@@ -1610,7 +1622,6 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -2245,7 +2256,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(unsigned long);
extern void __init vfs_caches_init(void);
extern struct kmem_cache *names_cachep;
@@ -2607,7 +2618,7 @@ static inline void insert_inode_hash(struct inode *inode)
extern void __remove_inode_hash(struct inode *);
static inline void remove_inode_hash(struct inode *inode)
{
if (!inode_unhashed(inode))
if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
__remove_inode_hash(inode);
}
@@ -2666,19 +2677,6 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
get_block_t, dio_iodone_t, int flags);
int dax_clear_blocks(struct inode *, sector_t block, long size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset);
@@ -3040,4 +3038,6 @@ static inline bool dir_relax(struct inode *inode)
return !IS_DEADDIR(inode);
}
extern bool path_noexec(const struct path *path);
#endif /* _LINUX_FS_H */

View File

@@ -20,11 +20,6 @@
#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI
PHY CLK to become stable - 10ms*/
#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */
#define FSL_USB_VER_OLD 0
#define FSL_USB_VER_1_6 1
#define FSL_USB_VER_2_2 2
#define FSL_USB_VER_2_4 3
#define FSL_USB_VER_2_5 4
#include <linux/types.h>
@@ -52,6 +47,15 @@
*
*/
enum fsl_usb2_controller_ver {
FSL_USB_VER_NONE = -1,
FSL_USB_VER_OLD = 0,
FSL_USB_VER_1_6 = 1,
FSL_USB_VER_2_2 = 2,
FSL_USB_VER_2_4 = 3,
FSL_USB_VER_2_5 = 4,
};
enum fsl_usb2_operating_modes {
FSL_USB2_MPH_HOST,
FSL_USB2_DR_HOST,
@@ -65,6 +69,7 @@ enum fsl_usb2_phy_modes {
FSL_USB2_PHY_UTMI,
FSL_USB2_PHY_UTMI_WIDE,
FSL_USB2_PHY_SERIAL,
FSL_USB2_PHY_UTMI_DUAL,
};
struct clk;
@@ -72,7 +77,7 @@ struct platform_device;
struct fsl_usb2_platform_data {
/* board specific information */
int controller_ver;
enum fsl_usb2_controller_ver controller_ver;
enum fsl_usb2_operating_modes operating_mode;
enum fsl_usb2_phy_modes phy_mode;
unsigned int port_enables;
@@ -93,6 +98,9 @@ struct fsl_usb2_platform_data {
unsigned suspended:1;
unsigned already_suspended:1;
unsigned has_fsl_erratum_a007792:1;
unsigned has_fsl_erratum_a005275:1;
unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
u32 pm_command;

View File

@@ -841,9 +841,59 @@ struct fsl_ifc_ctrl {
u32 nand_stat;
wait_queue_head_t nand_wait;
bool little_endian;
};
extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
static inline u32 ifc_in32(void __iomem *addr)
{
u32 val;
if (fsl_ifc_ctrl_dev->little_endian)
val = ioread32(addr);
else
val = ioread32be(addr);
return val;
}
static inline u16 ifc_in16(void __iomem *addr)
{
u16 val;
if (fsl_ifc_ctrl_dev->little_endian)
val = ioread16(addr);
else
val = ioread16be(addr);
return val;
}
static inline u8 ifc_in8(void __iomem *addr)
{
return ioread8(addr);
}
static inline void ifc_out32(u32 val, void __iomem *addr)
{
if (fsl_ifc_ctrl_dev->little_endian)
iowrite32(val, addr);
else
iowrite32be(val, addr);
}
static inline void ifc_out16(u16 val, void __iomem *addr)
{
if (fsl_ifc_ctrl_dev->little_endian)
iowrite16(val, addr);
else
iowrite16be(val, addr);
}
static inline void ifc_out8(u8 val, void __iomem *addr)
{
iowrite8(val, addr);
}
#endif /* __ASM_FSL_IFC_H */

View File

@@ -195,40 +195,49 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
* a mark is simply an object attached to an in core inode which allows an
* A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
* these are flushed when an inode is evicted from core and may be flushed
* when the inode is modified (as seen by fsnotify_access). Some fsnotify users
* (such as dnotify) will flush these when the open fd is closed and not at
* inode eviction or modification.
* These are flushed when an inode is evicted from core and may be flushed
* when the inode is modified (as seen by fsnotify_access). Some fsnotify
* users (such as dnotify) will flush these when the open fd is closed and not
* at inode eviction or modification.
*
* Text in brackets is showing the lock(s) protecting modifications of a
* particular entry. obj_lock means either inode->i_lock or
* mnt->mnt_root->d_lock depending on the mark type.
*/
struct fsnotify_mark {
__u32 mask; /* mask this mark is for */
/* we hold ref for each i_list and g_list. also one ref for each 'thing'
/* Mask this mark is for [mark->lock, group->mark_mutex] */
__u32 mask;
/* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
struct fsnotify_group *group; /* group this mark is for */
struct list_head g_list; /* list of marks by group->i_fsnotify_marks
* Also reused for queueing mark into
* destroy_list when it's waiting for
* the end of SRCU period before it can
* be freed */
spinlock_t lock; /* protect group and inode */
struct hlist_node obj_list; /* list of marks for inode / vfsmount */
struct list_head free_list; /* tmp list used when freeing this mark */
union {
atomic_t refcnt;
/* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */
struct fsnotify_group *group;
/* List of marks by group->i_fsnotify_marks. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */
struct list_head g_list;
/* Protects inode / mnt pointers, flags, masks */
spinlock_t lock;
/* List of marks for inode / vfsmount [obj_lock] */
struct hlist_node obj_list;
union { /* Object pointer [mark->lock, group->mark_mutex] */
struct inode *inode; /* inode this mark is associated with */
struct vfsmount *mnt; /* vfsmount this mark is associated with */
};
__u32 ignored_mask; /* events types to ignore */
/* Events types to ignore [mark->lock, group->mark_mutex] */
__u32 ignored_mask;
#define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
unsigned int flags; /* vfsmount or inode mark? */
#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
unsigned int flags; /* flags [mark->lock] */
void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
@@ -345,8 +354,10 @@ extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* detach mark from inode / mount list, group list, drop inode reference */
extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
/* free mark */
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */
@@ -357,7 +368,7 @@ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, un
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct list_head *list);
extern void fsnotify_unmount_inodes(struct super_block *sb);
/* put here because inotify does some weird stuff when destroying watches */
extern void fsnotify_init_event(struct fsnotify_event *event,
@@ -393,7 +404,7 @@ static inline u32 fsnotify_get_cookie(void)
return 0;
}
static inline void fsnotify_unmount_inodes(struct list_head *list)
static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
#endif /* CONFIG_FSNOTIFY */

View File

@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* SAVE_REGS. If another ops with this flag set is already registered
* for any of the functions that this ops will be registered for, then
* this ops will fail to register or set_filter_ip.
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -132,6 +133,7 @@ enum {
FTRACE_OPS_FL_MODIFYING = 1 << 11,
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
FTRACE_OPS_FL_PID = 1 << 14,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
struct ftrace_ops *next;
unsigned long flags;
void *private;
ftrace_func_t saved_func;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
int nr_trampolines;

View File

@@ -59,6 +59,8 @@ struct gen_pool {
genpool_algo_t algo; /* allocation function */
void *data;
const char *name;
};
/*
@@ -118,8 +120,8 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid);
extern struct gen_pool *gen_pool_get(struct device *dev);
int min_alloc_order, int nid, const char *name);
extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
size_t size);

View File

@@ -13,6 +13,7 @@
#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
#ifdef CONFIG_BLOCK
@@ -124,7 +125,7 @@ struct hd_struct {
#else
struct disk_stats dkstats;
#endif
atomic_t ref;
struct percpu_ref ref;
struct rcu_head rcu_head;
};
@@ -611,7 +612,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
sector_t len, int flags,
struct partition_meta_info
*info);
extern void __delete_partition(struct hd_struct *);
extern void __delete_partition(struct percpu_ref *);
extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
@@ -640,27 +641,39 @@ extern ssize_t part_fail_store(struct device *dev,
const char *buf, size_t count);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
static inline void hd_ref_init(struct hd_struct *part)
static inline int hd_ref_init(struct hd_struct *part)
{
atomic_set(&part->ref, 1);
smp_mb();
if (percpu_ref_init(&part->ref, __delete_partition, 0,
GFP_KERNEL))
return -ENOMEM;
return 0;
}
static inline void hd_struct_get(struct hd_struct *part)
{
atomic_inc(&part->ref);
smp_mb__after_atomic();
percpu_ref_get(&part->ref);
}
static inline int hd_struct_try_get(struct hd_struct *part)
{
return atomic_inc_not_zero(&part->ref);
return percpu_ref_tryget_live(&part->ref);
}
static inline void hd_struct_put(struct hd_struct *part)
{
if (atomic_dec_and_test(&part->ref))
__delete_partition(part);
percpu_ref_put(&part->ref);
}
static inline void hd_struct_kill(struct hd_struct *part)
{
percpu_ref_kill(&part->ref);
}
static inline void hd_free_part(struct hd_struct *part)
{
free_part_stats(part);
free_part_info(part);
percpu_ref_exit(&part->ref);
}
/*

View File

@@ -63,7 +63,10 @@ struct vm_area_struct;
* but it is definitely preferable to use the flag rather than opencode endless
* loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
* __GFP_NORETRY: The VM implementation must not retry indefinitely and will
* return NULL when direct reclaim and memory compaction have failed to allow
* the allocation to succeed. The OOM killer is not called with the current
* implementation.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
@@ -300,22 +303,31 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
/*
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
*/
static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
/* Unknown node is current node */
if (nid < 0)
nid = numa_node_id();
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
VM_WARN_ON(!node_online(nid));
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
/*
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
* prefer the current CPU's closest node. Otherwise node must be valid and
* online.
*/
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
return __alloc_pages_node(nid, gfp_mask, order);
}
#ifdef CONFIG_NUMA
@@ -354,7 +366,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
/* This is different from alloc_pages_exact_node !!! */
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \

View File

@@ -47,17 +47,17 @@ enum gpiod_flags {
int gpiod_count(struct device *dev, const char *con_id);
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
@@ -70,18 +70,18 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
void gpiod_put(struct gpio_desc *desc);
void gpiod_put_array(struct gpio_descs *descs);
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
@@ -146,31 +146,31 @@ static inline int gpiod_count(struct device *dev, const char *con_id)
return 0;
}
static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
__gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
__gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
__gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
@@ -206,7 +206,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
}
static inline struct gpio_desc *__must_check
__devm_gpiod_get(struct device *dev,
devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
@@ -214,7 +214,7 @@ __devm_gpiod_get(struct device *dev,
}
static inline
struct gpio_desc *__must_check
__devm_gpiod_get_index(struct device *dev,
devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
@@ -223,14 +223,14 @@ __devm_gpiod_get_index(struct device *dev,
}
static inline struct gpio_desc *__must_check
__devm_gpiod_get_optional(struct device *dev, const char *con_id,
devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@ static inline struct gpio_desc *devm_get_gpiod_from_child(
#endif /* CONFIG_GPIOLIB */
/*
* Vararg-hacks! This is done to transition the kernel to always pass
* the options flags argument to the below functions. During a transition
* phase these vararg macros make both old-and-newstyle code compile,
* but when all calls to the elder API are removed, these should go away
* and the __gpiod_get() etc functions above be renamed just gpiod_get()
* etc.
*/
#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
#define __gpiod_get_index(dev, con_id, index, flags, ...) \
__gpiod_get_index(dev, con_id, index, flags)
#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
#define __gpiod_get_optional(dev, con_id, flags, ...) \
__gpiod_get_optional(dev, con_id, flags)
#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
__gpiod_get_index_optional(dev, con_id, index, flags)
#define gpiod_get_index_optional(varargs...) \
__gpiod_get_index_optional(varargs, GPIOD_ASIS)
#define __devm_gpiod_get(dev, con_id, flags, ...) \
__devm_gpiod_get(dev, con_id, flags)
#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
__devm_gpiod_get_index(dev, con_id, index, flags)
#define devm_gpiod_get_index(varargs...) \
__devm_gpiod_get_index(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
__devm_gpiod_get_optional(dev, con_id, flags)
#define devm_gpiod_get_optional(varargs...) \
__devm_gpiod_get_optional(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
__devm_gpiod_get_index_optional(dev, con_id, index, flags)
#define devm_gpiod_get_index_optional(varargs...) \
__devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);

View File

@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
struct device;
@@ -64,6 +65,17 @@ struct seq_file;
* registers.
* @irq_not_threaded: flag must be set if @can_sleep is set but the
* IRQs don't need to be threaded
* @irqchip: GPIO IRQ chip impl, provided by GPIO driver
* @irqdomain: Interrupt translation domain; responsible for mapping
* between GPIO hwirq number and linux irq number
* @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
* @irq_handler: the irq handler to use (often a predefined irq core function)
* for GPIO IRQs, provided by GPIO driver
* @irq_default_type: default IRQ triggering type applied during GPIO driver
* initialization, provided by GPIO driver
* @irq_parent: GPIO IRQ chip parent/bank linux irq number,
* provided by GPIO driver
* @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@ struct gpio_chip {
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
int irq_parent;
struct lock_class_key *lock_key;
#endif
#if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
int parent_irq,
irq_flow_handler_t parent_handler);
int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type);
int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
struct lock_class_key *lock_key);
#ifdef CONFIG_LOCKDEP
#define gpiochip_irqchip_add(...) \
( \
({ \
static struct lock_class_key _key; \
_gpiochip_irqchip_add(__VA_ARGS__, &_key); \
}) \
)
#else
#define gpiochip_irqchip_add(...) \
_gpiochip_irqchip_add(__VA_ARGS__, NULL)
#endif
#endif /* CONFIG_GPIOLIB_IRQCHIP */

View File

@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
}
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
#endif /* __LINUX_GPIO_MACHINE_H */

View File

@@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned long pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
@@ -122,7 +124,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
#endif
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
@@ -138,15 +140,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
else
return 0;
}
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
if (!vma->anon_vma || vma->vm_ops)
return;
__vma_adjust_trans_huge(vma, start, end, adjust_next);
}
static inline int hpage_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
@@ -164,6 +157,13 @@ static inline bool is_huge_zero_page(struct page *page)
return ACCESS_ONCE(huge_zero_page) == page;
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return is_huge_zero_page(pmd_page(pmd));
}
struct page *get_huge_zero_page(void);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })

View File

@@ -35,6 +35,9 @@ struct resv_map {
struct kref refs;
spinlock_t lock;
struct list_head regions;
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
};
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -80,11 +83,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping,
pgoff_t idx, unsigned long address);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -320,9 +330,13 @@ struct huge_bootmem_page {
#endif
};
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -471,6 +485,7 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page(v, a, r) NULL
#define alloc_huge_page_node(h, nid) NULL
#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL

View File

@@ -977,6 +977,11 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t min, resource_size_t max,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok);
/**
* VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
*
@@ -1233,8 +1238,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
extern struct resource hyperv_mmio;
/*
* Negotiated version with the Host.
*/

View File

@@ -121,6 +121,9 @@ extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
u8 command, u8 length,
const u8 *values);
extern s32
i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
u8 command, u8 length, u8 *values);
#endif /* I2C */
/**
@@ -550,11 +553,12 @@ void i2c_lock_adapter(struct i2c_adapter *);
void i2c_unlock_adapter(struct i2c_adapter *);
/*flags for the client struct: */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
/* i2c adapter classes (bitmask) */
@@ -638,6 +642,8 @@ extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
/* must call put_device() when done with returned i2c_adapter device */
extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
#else
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -649,6 +655,11 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
{
return NULL;
}
static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
{
return NULL;
}
#endif /* CONFIG_OF */
#endif /* _LINUX_I2C_H */

View File

@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2

View File

@@ -37,6 +37,7 @@ static inline struct igmpv3_query *
return (struct igmpv3_query *)skb_transport_header(skb);
}
extern int sysctl_igmp_llm_reports;
extern int sysctl_igmp_max_memberships;
extern int sysctl_igmp_max_msf;
extern int sysctl_igmp_qrv;

View File

@@ -166,6 +166,7 @@ struct st_sensor_transfer_function {
/**
* struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
* @wai_addr: The address of WhoAmI register.
* @sensors_supported: List of supported sensors by struct itself.
* @ch: IIO channels for the sensor.
* @odr: Output data rate register and ODR list available.
@@ -179,6 +180,7 @@ struct st_sensor_transfer_function {
*/
struct st_sensor_settings {
u8 wai;
u8 wai_addr;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
int num_ch;

View File

@@ -100,7 +100,7 @@ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
/**
* iio_channel_cb_get_channels() - get access to the underlying channels.
* @cb_buff: The callback buffer from whom we want the channel
* @cb_buffer: The callback buffer from whom we want the channel
* information.
*
* This function allows one to obtain information about the channels.

View File

@@ -644,6 +644,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
*/
#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
/**
* IIO_RAD_TO_DEGREE() - Convert rad to degree
* @rad: A value in rad
*
* Returns the given value converted from rad to degree
*/
#define IIO_RAD_TO_DEGREE(rad) \
(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
/**
* IIO_G_TO_M_S_2() - Convert g to meter / second**2
* @g: A value in g
@@ -652,4 +661,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
*/
#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
/**
* IIO_M_S_2_TO_G() - Convert meter / second**2 to g
* @ms2: A value in meter / second**2
*
* Returns the given value converted from meter / second**2 to g
*/
#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
#endif /* _INDUSTRIAL_IO_H_ */

View File

@@ -18,7 +18,8 @@ struct iio_chan_spec;
* struct iio_dev_attr - iio specific device attribute
* @dev_attr: underlying device attribute
* @address: associated register address
* @l: list head for maintaining list of dynamically created attrs.
* @l: list head for maintaining list of dynamically created attrs
* @c: specification for the underlying channel
*/
struct iio_dev_attr {
struct device_attribute dev_attr;

View File

@@ -18,6 +18,9 @@ struct iio_subirq {
bool enabled;
};
struct iio_dev;
struct iio_trigger;
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
* @owner: used to monitor usage count of the trigger.

View File

@@ -7,8 +7,8 @@ struct iio_dev;
struct iio_buffer_setup_ops;
int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
irqreturn_t (*pollfunc_bh)(int irq, void *p),
irqreturn_t (*pollfunc_th)(int irq, void *p),
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
const struct iio_buffer_setup_ops *setup_ops);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);

View File

@@ -25,6 +25,13 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
#ifdef CONFIG_CGROUPS
#define INIT_GROUP_RWSEM(sig) \
.group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
#else
#define INIT_GROUP_RWSEM(sig)
#endif
#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -32,6 +39,14 @@ extern struct fs_struct init_fs;
#define INIT_CPUSET_SEQ(tsk)
#endif
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
.lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
},
#else
#define INIT_PREV_CPUTIME(x)
#endif
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -46,8 +61,10 @@ extern struct fs_struct init_fs;
.cputime_atomic = INIT_CPUTIME_ATOMIC, \
.running = 0, \
}, \
INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
INIT_GROUP_RWSEM(sig) \
}
extern struct nsproxy init_nsproxy;
@@ -246,6 +263,7 @@ extern struct task_group root_task_group;
INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
INIT_PREV_CPUTIME(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \

View File

@@ -344,7 +344,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain **domains; /* ptr to domains */
struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */

View File

@@ -21,7 +21,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bug.h>
#include <asm/io.h>
#include <linux/io.h>
#include <asm/page.h>
/*

View File

@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
struct resource;
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -80,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
void devm_ioremap_release(struct device *dev, void *res);
void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
void *__devm_memremap_pages(struct device *dev, struct resource *res);
#ifdef CONFIG_ZONE_DEVICE
void *devm_memremap_pages(struct device *dev, struct resource *res);
#else
static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
{
/*
* Fail attempts to call devm_memremap_pages() without
* ZONE_DEVICE support enabled, this requires callers to fall
* back to plain devm_memremap() based on config
*/
WARN_ON_ONCE(1);
return ERR_PTR(-ENXIO);
}
#endif
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
@@ -121,4 +145,13 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
MEMREMAP_WT = 1 << 1,
};
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
#endif /* _LINUX_IO_H */

View File

@@ -115,6 +115,11 @@ struct ipmi_smi_handlers {
implement it. */
void (*set_need_watch)(void *send_info, bool enable);
/*
* Called when flushing all pending messages.
*/
void (*flush_messages)(void *send_info);
/* Called when the interface should go into "run to
completion" mode. If this call sets the value to true, the
interface should make sure that all messages are flushed
@@ -207,7 +212,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
upper layer until the start_processing() function in the handlers
is called, and the lower layer must get the interface from that
call. */
int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,

View File

@@ -29,7 +29,9 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
__s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
@@ -57,6 +59,7 @@ struct ipv6_devconf {
bool initialized;
struct in6_addr secret;
} stable_secret;
__s32 use_oif_addrs_only;
void *sysctl;
};
@@ -94,7 +97,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
struct inet6_skb_parm {
int iif;
__be16 ra;
__u16 hop;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
@@ -111,6 +113,7 @@ struct inet6_skb_parm {
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
#define IP6SKB_HOPBYHOP 32
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))

View File

@@ -110,8 +110,8 @@ enum {
/*
* Return value for chip->irq_set_affinity()
*
* IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
* IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendent irqchips.
@@ -129,9 +129,19 @@ struct irq_domain;
* struct irq_common_data - per irq data shared by all irqchips
* @state_use_accessors: status information for irq chip functions.
* Use accessor functions to deal with it
* @node: node index useful for balancing
* @handler_data: per-IRQ data for the irq_chip methods
* @affinity: IRQ affinity on SMP
* @msi_desc: MSI descriptor
*/
struct irq_common_data {
unsigned int state_use_accessors;
#ifdef CONFIG_NUMA
unsigned int node;
#endif
void *handler_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
};
/**
@@ -139,38 +149,26 @@ struct irq_common_data {
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
* @node: node index useful for balancing
* @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
* @parent_data: pointer to parent struct irq_data to support hierarchy
* irq_domain
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
* @msi_desc: MSI descriptor
* @affinity: IRQ affinity on SMP
*
* The fields here need to overlay the ones in irq_desc until we
* cleaned up the direct references and switched everything over to
* irq_data.
*/
struct irq_data {
u32 mask;
unsigned int irq;
unsigned long hwirq;
unsigned int node;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_data *parent_data;
#endif
void *handler_data;
void *chip_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
};
/*
@@ -190,6 +188,7 @@ struct irq_data {
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
* IRQD_WAKEUP_ARMED - Wakeup mode armed
* IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -204,6 +203,7 @@ enum {
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
IRQD_FORWARDED_TO_VCPU = (1 << 20),
};
#define __irqd_to_state(d) ((d)->common->state_use_accessors)
@@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
}
static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
}
static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
{
__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
}
/*
* Functions for chained handlers which can be enabled/disabled by the
@@ -324,8 +338,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
* @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
* @irq_suspend: function called from core code on suspend once per chip
* @irq_resume: function called from core code on resume once per chip
* @irq_suspend: function called from core code on suspend once per
* chip, when one or more interrupts are installed
* @irq_resume: function called from core code on resume once per chip,
* when one ore more interrupts are installed
* @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_calc_mask: Optional function to set irq_data.mask for special cases
* @irq_print_chip: optional to print special chip info in show_interrupts
@@ -459,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq)
* Built-in IRQ handlers for various IRQ types,
* callable via desc->handle_irq()
*/
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_level_irq(struct irq_desc *desc);
extern void handle_fasteoi_irq(struct irq_desc *desc);
extern void handle_edge_irq(struct irq_desc *desc);
extern void handle_edge_eoi_irq(struct irq_desc *desc);
extern void handle_simple_irq(struct irq_desc *desc);
extern void handle_percpu_irq(struct irq_desc *desc);
extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
@@ -484,11 +500,11 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret);
extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
/* Enable/disable irq debugging output: */
@@ -625,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
static inline void *irq_get_handler_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->handler_data : NULL;
return d ? d->common->handler_data : NULL;
}
static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
{
return d->handler_data;
return d->common->handler_data;
}
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->msi_desc : NULL;
return d ? d->common->msi_desc : NULL;
}
static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
return d->msi_desc;
return d->common->msi_desc;
}
static inline u32 irq_get_trigger_type(unsigned int irq)
@@ -650,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
static inline int irq_common_data_get_node(struct irq_common_data *d)
{
#ifdef CONFIG_NUMA
return d->node;
#else
return 0;
#endif
}
static inline int irq_data_get_node(struct irq_data *d)
{
return d->node;
return irq_common_data_get_node(d->common);
}
static inline struct cpumask *irq_get_affinity_mask(int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->affinity : NULL;
return d ? d->common->affinity : NULL;
}
static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
return d->affinity;
return d->common->affinity;
}
unsigned int arch_dynirq_lower_bound(unsigned int from);
@@ -761,6 +786,12 @@ struct irq_chip_type {
* @reg_base: Register base address (virtual)
* @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
* @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
* @suspend: Function called from core code on suspend once per
* chip; can be useful instead of irq_chip::suspend to
* handle chip details even when no interrupts are in use
* @resume: Function called from core code on resume once per chip;
* can be useful instead of irq_chip::suspend to handle
* chip details even when no interrupts are in use
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
@@ -787,6 +818,8 @@ struct irq_chip_generic {
void __iomem *reg_base;
u32 (*reg_readl)(void __iomem *addr);
void (*reg_writel)(u32 val, void __iomem *addr);
void (*suspend)(struct irq_chip_generic *gc);
void (*resume)(struct irq_chip_generic *gc);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;

View File

@@ -104,6 +104,8 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
#define GICR_ISACTIVER GICD_ISACTIVER
#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
@@ -268,9 +270,12 @@
#define ICH_LR_EOI (1UL << 41)
#define ICH_LR_GROUP (1UL << 60)
#define ICH_LR_HW (1UL << 61)
#define ICH_LR_STATE (3UL << 62)
#define ICH_LR_PENDING_BIT (1UL << 62)
#define ICH_LR_ACTIVE_BIT (1UL << 63)
#define ICH_LR_PHYS_ID_SHIFT 32
#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1)
@@ -288,6 +293,7 @@
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
@@ -360,6 +366,7 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
#include <asm/msi.h>
/*
* We need a value to serve as a irq-type for LPIs. Choose one that will
@@ -384,6 +391,12 @@ static inline void gic_write_eoir(u64 irq)
isb();
}
static inline void gic_write_dir(u64 irq)
{
asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
isb();
}
struct irq_domain;
int its_cpu_init(void);
int its_init(struct device_node *node, struct rdists *rdists,

View File

@@ -20,9 +20,13 @@
#define GIC_CPU_ALIAS_BINPOINT 0x1c
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
#define GIC_CPU_DEACTIVATE 0x1000
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0
#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
#define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023
#define GICC_DIS_BYPASS_MASK 0x1e0
@@ -71,11 +75,12 @@
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
#define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_CTRL_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
@@ -95,11 +100,10 @@
struct device_node;
void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_cpu_if_down(void);
int gic_cpu_if_down(unsigned int gic_nr);
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)

View File

@@ -41,12 +41,20 @@
/* Shared Global Counter */
#define GIC_SH_COUNTER_31_00_OFS 0x0010
/* 64-bit counter register for CM3 */
#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
#define GIC_SH_COUNTER_63_32_OFS 0x0014
#define GIC_SH_REVISIONID_OFS 0x0020
/* Convert an interrupt number to a byte offset/bit for multi-word registers */
#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
#define GIC_INTR_BIT(intr) ((intr) % 32)
#define GIC_INTR_OFS(intr) ({ \
unsigned bits = mips_cm_is64 ? 64 : 32; \
unsigned reg_idx = (intr) / bits; \
unsigned reg_width = bits / 8; \
\
reg_idx * reg_width; \
})
#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
/* Polarity : Reset Value is always 0 */
#define GIC_SH_SET_POLARITY_OFS 0x0100
@@ -98,6 +106,8 @@
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
#define GIC_VPE_COMPARE_LO_OFS 0x00a0
/* 64-bit Compare register on CM3 */
#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
#define GIC_VPE_COMPARE_HI_OFS 0x00a4
#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100

View File

@@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
return irq_to_desc(data->irq);
#else
return container_of(data, struct irq_desc, irq_data);
#endif
return container_of(data->common, struct irq_desc, irq_common_data);
}
static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
@@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
return desc->irq_data.handler_data;
return desc->irq_common_data.handler_data;
}
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
{
return desc->irq_data.msi_desc;
return desc->irq_common_data.msi_desc;
}
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
* handle an interrupt.
*/
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
static inline void generic_handle_irq_desc(struct irq_desc *desc)
{
desc->handle_irq(irq, desc);
desc->handle_irq(desc);
}
int generic_handle_irq(unsigned int irq);
@@ -166,33 +160,14 @@ static inline int handle_domain_irq(struct irq_domain *domain,
#endif
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
static inline int irq_desc_has_action(struct irq_desc *desc)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
/* caller has locked the irq_desc and both params are valid */
static inline void __irq_set_handler_locked(unsigned int irq,
irq_flow_handler_t handler)
static inline int irq_has_action(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
/* caller has locked the irq_desc and both params are valid */
static inline void
__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
irq_desc_get_irq_data(desc)->chip = chip;
desc->handle_irq = handler;
desc->name = name;
return irq_desc_has_action(irq_to_desc(irq));
}
/**

View File

@@ -45,6 +45,20 @@ struct irq_data;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
/*
* Should several domains have the same device node, but serve
* different purposes (for example one domain is for PCI/MSI, and the
* other for wired IRQs), they can be distinguished using a
* bus-specific token. Most domains are expected to only carry
* DOMAIN_BUS_ANY.
*/
enum irq_domain_bus_token {
DOMAIN_BUS_ANY = 0,
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,
};
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
@@ -61,7 +75,8 @@ struct irq_data;
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
int (*match)(struct irq_domain *d, struct device_node *node);
int (*match)(struct irq_domain *d, struct device_node *node,
enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -116,6 +131,7 @@ struct irq_domain {
/* Optional data */
struct device_node *of_node;
enum irq_domain_bus_token bus_token;
struct irq_domain_chip_generic *gc;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_domain *parent;
@@ -161,9 +177,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
extern struct irq_domain *irq_find_host(struct device_node *node);
extern struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
static inline struct irq_domain *irq_find_host(struct device_node *node)
{
return irq_find_matching_host(node, DOMAIN_BUS_ANY);
}
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.

View File

@@ -8,7 +8,7 @@
struct irq_desc;
struct irq_data;
typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
#include <crypto/hash.h>
#endif
@@ -336,7 +337,45 @@ BUFFER_FNS(Freed, freed)
BUFFER_FNS(Shadow, shadow)
BUFFER_FNS(Verified, verified)
#include <linux/jbd_common.h>
static inline struct buffer_head *jh2bh(struct journal_head *jh)
{
return jh->b_bh;
}
static inline struct journal_head *bh2jh(struct buffer_head *bh)
{
return bh->b_private;
}
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
bit_spin_lock(BH_State, &bh->b_state);
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
return bit_spin_trylock(BH_State, &bh->b_state);
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
return bit_spin_is_locked(BH_State, &bh->b_state);
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
bit_spin_unlock(BH_State, &bh->b_state);
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_lock(BH_JournalHead, &bh->b_state);
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_unlock(BH_JournalHead, &bh->b_state);
}
#define J_ASSERT(assert) BUG_ON(!(assert))
@@ -1042,8 +1081,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);

View File

@@ -1,46 +0,0 @@
#ifndef _LINUX_JBD_STATE_H
#define _LINUX_JBD_STATE_H
#include <linux/bit_spinlock.h>
static inline struct buffer_head *jh2bh(struct journal_head *jh)
{
return jh->b_bh;
}
static inline struct journal_head *bh2jh(struct buffer_head *bh)
{
return bh->b_private;
}
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
bit_spin_lock(BH_State, &bh->b_state);
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
return bit_spin_trylock(BH_State, &bh->b_state);
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
return bit_spin_is_locked(BH_State, &bh->b_state);
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
bit_spin_unlock(BH_State, &bh->b_state);
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_lock(BH_JournalHead, &bh->b_state);
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_unlock(BH_JournalHead, &bh->b_state);
}
#endif

View File

@@ -351,7 +351,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static inline unsigned long msecs_to_jiffies(const unsigned int m)
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
@@ -363,18 +363,11 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m)
}
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
}
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return u * (HZ / USEC_PER_SEC);
}
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
#else
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
@@ -405,7 +398,7 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static inline unsigned long usecs_to_jiffies(const unsigned int u)
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
@@ -416,9 +409,25 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u)
}
}
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
static inline unsigned long timespec_to_jiffies(const struct timespec *value)
{
struct timespec64 ts = timespec_to_timespec64(*value);
return timespec64_to_jiffies(&ts);
}
static inline void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value)
{
struct timespec64 ts;
jiffies_to_timespec64(jiffies, &ts);
*value = timespec64_to_timespec(ts);
}
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);

View File

@@ -7,17 +7,50 @@
* Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
*
* DEPRECATED API:
*
* The use of 'struct static_key' directly, is now DEPRECATED. In addition
* static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
*
* struct static_key false = STATIC_KEY_INIT_FALSE;
* struct static_key true = STATIC_KEY_INIT_TRUE;
* static_key_true()
* static_key_false()
*
* The updated API replacements are:
*
* DEFINE_STATIC_KEY_TRUE(key);
* DEFINE_STATIC_KEY_FALSE(key);
* static_branch_likely()
* static_branch_unlikely()
*
* Jump labels provide an interface to generate dynamic branches using
* self-modifying code. Assuming toolchain and architecture support, the result
* of a "if (static_key_false(&key))" statement is an unconditional branch (which
* defaults to false - and the true block is placed out of line).
* self-modifying code. Assuming toolchain and architecture support, if we
* define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
* an "if (static_branch_unlikely(&key))" statement is an unconditional branch
* (which defaults to false - and the true block is placed out of line).
* Similarly, we can define an initially true key via
* "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
* "if (static_branch_unlikely(&key))", in which case we will generate an
* unconditional branch to the out-of-line true branch. Keys that are
* initially true or false can be using in both static_branch_unlikely()
* and static_branch_likely() statements.
*
* However at runtime we can change the branch target using
* static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
* object, and for as long as there are references all branches referring to
* that particular key will point to the (out of line) true block.
* At runtime we can change the branch target by setting the key
* to true via a call to static_branch_enable(), or false using
* static_branch_disable(). If the direction of the branch is switched by
* these calls then we run-time modify the branch target via a
* no-op -> jump or jump -> no-op conversion. For example, for an
* initially false key that is used in an "if (static_branch_unlikely(&key))"
* statement, setting the key to true requires us to patch in a jump
* to the out-of-line of true branch.
*
* Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
* In addition to static_branch_{enable,disable}, we can also reference count
* the key or branch direction via static_branch_{inc,dec}. Thus,
* static_branch_inc() can be thought of as a 'make more true' and
* static_branch_dec() as a 'make more false'.
*
* Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.).
* OTOH, since the affected branches are unconditional, their runtime overhead
* will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +62,10 @@
* cause significant performance degradation. Struct static_key_deferred and
* static_key_slow_dec_deferred() provide for this.
*
* Lacking toolchain and or architecture support, jump labels fall back to a simple
* conditional branch.
* Lacking toolchain and or architecture support, static keys fall back to a
* simple conditional branch.
*
* struct static_key my_key = STATIC_KEY_INIT_TRUE;
*
* if (static_key_true(&my_key)) {
* }
*
* will result in the true case being in-line and starts the key with a single
* reference. Mixing static_key_true() and static_key_false() on the same key is not
* allowed.
*
* Not initializing the key (static data is initialized to 0s anyway) is the
* same as using STATIC_KEY_INIT_FALSE.
* Additional babbling in: Documentation/static-keys.txt
*/
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +109,8 @@ struct static_key {
#ifndef __ASSEMBLY__
enum jump_label_type {
JUMP_LABEL_DISABLE = 0,
JUMP_LABEL_ENABLE,
JUMP_LABEL_NOP = 0,
JUMP_LABEL_JMP,
};
struct module;
@@ -101,33 +124,18 @@ static inline int static_key_count(struct static_key *key)
#ifdef HAVE_JUMP_LABEL
#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL
#define JUMP_LABEL_TYPE_MASK 1UL
static
inline struct jump_entry *jump_label_get_entries(struct static_key *key)
{
return (struct jump_entry *)((unsigned long)key->entries
& ~JUMP_LABEL_TYPE_MASK);
}
static inline bool jump_label_get_branch_default(struct static_key *key)
{
if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
JUMP_LABEL_TYPE_TRUE_BRANCH)
return true;
return false;
}
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL
static __always_inline bool static_key_false(struct static_key *key)
{
return arch_static_branch(key);
return arch_static_branch(key, false);
}
static __always_inline bool static_key_true(struct static_key *key)
{
return !static_key_false(key);
return !arch_static_branch(key, true);
}
extern struct jump_entry __start___jump_table[];
@@ -145,12 +153,12 @@ extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
#define STATIC_KEY_INIT_TRUE ((struct static_key) \
#define STATIC_KEY_INIT_TRUE \
{ .enabled = ATOMIC_INIT(1), \
.entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
#define STATIC_KEY_INIT_FALSE ((struct static_key) \
.entries = (void *)JUMP_TYPE_TRUE }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = ATOMIC_INIT(0), \
.entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
.entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
@@ -198,10 +206,8 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
#define STATIC_KEY_INIT_TRUE ((struct static_key) \
{ .enabled = ATOMIC_INIT(1) })
#define STATIC_KEY_INIT_FALSE ((struct static_key) \
{ .enabled = ATOMIC_INIT(0) })
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
#endif /* HAVE_JUMP_LABEL */
@@ -213,6 +219,157 @@ static inline bool static_key_enabled(struct static_key *key)
return static_key_count(key) > 0;
}
static inline void static_key_enable(struct static_key *key)
{
int count = static_key_count(key);
WARN_ON_ONCE(count < 0 || count > 1);
if (!count)
static_key_slow_inc(key);
}
static inline void static_key_disable(struct static_key *key)
{
int count = static_key_count(key);
WARN_ON_ONCE(count < 0 || count > 1);
if (count)
static_key_slow_dec(key);
}
/* -------------------------------------------------------------------------- */
/*
* Two type wrappers around static_key, such that we can use compile time
* type differentiation to emit the right code.
*
* All the below code is macros in order to play type games.
*/
struct static_key_true {
struct static_key key;
};
struct static_key_false {
struct static_key key;
};
#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
#ifdef HAVE_JUMP_LABEL
/*
* Combine the right initial value (type) with the right branch order
* to generate the desired result.
*
*
* type\branch| likely (1) | unlikely (0)
* -----------+-----------------------+------------------
* | |
* true (1) | ... | ...
* | NOP | JMP L
* | <br-stmts> | 1: ...
* | L: ... |
* | |
* | | L: <br-stmts>
* | | jmp 1b
* | |
* -----------+-----------------------+------------------
* | |
* false (0) | ... | ...
* | JMP L | NOP
* | <br-stmts> | 1: ...
* | L: ... |
* | |
* | | L: <br-stmts>
* | | jmp 1b
* | |
* -----------+-----------------------+------------------
*
* The initial value is encoded in the LSB of static_key::entries,
* type: 0 = false, 1 = true.
*
* The branch type is encoded in the LSB of jump_entry::key,
* branch: 0 = unlikely, 1 = likely.
*
* This gives the following logic table:
*
* enabled type branch instuction
* -----------------------------+-----------
* 0 0 0 | NOP
* 0 0 1 | JMP
* 0 1 0 | NOP
* 0 1 1 | JMP
*
* 1 0 0 | JMP
* 1 0 1 | NOP
* 1 1 0 | JMP
* 1 1 1 | NOP
*
* Which gives the following functions:
*
* dynamic: instruction = enabled ^ branch
* static: instruction = type ^ branch
*
* See jump_label_type() / jump_label_init_type().
*/
extern bool ____wrong_branch_error(void);
#define static_branch_likely(x) \
({ \
bool branch; \
if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
branch = !arch_static_branch(&(x)->key, true); \
else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
branch; \
})
#define static_branch_unlikely(x) \
({ \
bool branch; \
if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
branch = arch_static_branch_jump(&(x)->key, false); \
else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
branch; \
})
#else /* !HAVE_JUMP_LABEL */
#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
#endif /* HAVE_JUMP_LABEL */
/*
* Advanced usage; refcount, branch is enabled when: count != 0
*/
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
/*
* Normal usage; boolean enable/disable.
*/
#define static_branch_enable(x) static_key_enable(&(x)->key)
#define static_branch_disable(x) static_key_disable(&(x)->key)
#endif /* _LINUX_JUMP_LABEL_H */
#endif /* __ASSEMBLY__ */

View File

@@ -10,11 +10,19 @@ struct vm_struct;
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#include <asm/kasan.h>
#include <asm/pgtable.h>
#include <linux/sched.h>
extern unsigned char kasan_zero_page[PAGE_SIZE];
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
void kasan_populate_zero_shadow(const void *shadow_start,
const void *shadow_end);
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)

View File

@@ -266,6 +266,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
size_t kernfs_path_len(struct kernfs_node *kn);
char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
@@ -332,6 +333,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{ return -ENOSYS; }
static inline size_t kernfs_path_len(struct kernfs_node *kn)
{ return 0; }
static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
size_t buflen)
{ return NULL; }

View File

@@ -16,7 +16,7 @@
#include <uapi/linux/kexec.h>
#ifdef CONFIG_KEXEC
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/compat.h>
@@ -318,12 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#else /* !CONFIG_KEXEC */
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len);
int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
Elf_Shdr *sechdrs, unsigned int relsec);
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned int relsec);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
#endif /* CONFIG_KEXEC */
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
#endif /* !defined(__ASSEBMLY__) */

View File

@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n);
extern void klist_iter_exit(struct klist_iter *i);
extern struct klist_node *klist_prev(struct klist_iter *i);
extern struct klist_node *klist_next(struct klist_iter *i);
#endif

View File

@@ -85,8 +85,6 @@ enum umh_disable_depth {
UMH_DISABLED,
};
extern void usermodehelper_init(void);
extern int __usermodehelper_disable(enum umh_disable_depth depth);
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);

View File

@@ -267,6 +267,8 @@ extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern bool within_kprobe_blacklist(unsigned long addr);
struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */

Some files were not shown because too many files have changed in this diff Show More