Merge branch 'perf/core' into perf/uprobes

Merge in latest upstream (and the latest perf development tree),
to prepare for tooling changes, and also to pick up v3.4 MM
changes that the uprobes code needs to take care of.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2012-04-14 13:18:27 +02:00
10570 changed files with 506456 additions and 292912 deletions

View File

@@ -3,6 +3,7 @@ header-y += can/
header-y += caif/
header-y += dvb/
header-y += hdlc/
header-y += hsi/
header-y += isdn/
header-y += mmc/
header-y += nfsd/
@@ -120,7 +121,6 @@ header-y += errno.h
header-y += errqueue.h
header-y += ethtool.h
header-y += eventpoll.h
header-y += ext2_fs.h
header-y += fadvise.h
header-y += falloc.h
header-y += fanotify.h
@@ -238,6 +238,7 @@ header-y += magic.h
header-y += major.h
header-y += map_to_7segment.h
header-y += matroxfb.h
header-y += mdio.h
header-y += media.h
header-y += mempolicy.h
header-y += meye.h
@@ -304,6 +305,7 @@ header-y += poll.h
header-y += posix_types.h
header-y += ppdev.h
header-y += ppp-comp.h
header-y += ppp-ioctl.h
header-y += ppp_defs.h
header-y += pps.h
header-y += prctl.h

View File

@@ -151,6 +151,7 @@ extern int ec_write(u8 addr, u8 val);
extern int ec_transaction(u8 command,
const u8 *wdata, unsigned wdata_len,
u8 *rdata, unsigned rdata_len);
extern acpi_handle ec_get_handle(void);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
@@ -371,4 +372,14 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
u32 pm1a_ctrl, u32 pm1b_ctrl));
acpi_status acpi_os_prepare_sleep(u8 sleep_state,
u32 pm1a_control, u32 pm1b_control);
#else
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
#endif /*_LINUX_ACPI_H*/

View File

@@ -5,8 +5,6 @@
#ifndef __ALTUART_H
#define __ALTUART_H
#include <linux/init.h>
struct altera_uart_platform_uart {
unsigned long mapbase; /* Physical address base */
unsigned int irq; /* Interrupt vector */
@@ -14,6 +12,4 @@ struct altera_uart_platform_uart {
unsigned int bus_shift; /* Bus shift (address stride) */
};
int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp);
#endif /* __ALTUART_H */

View File

@@ -60,6 +60,9 @@ extern struct bus_type amba_bustype;
int amba_driver_register(struct amba_driver *);
void amba_driver_unregister(struct amba_driver *);
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
void amba_device_put(struct amba_device *);
int amba_device_add(struct amba_device *, struct resource *);
int amba_device_register(struct amba_device *, struct resource *);
void amba_device_unregister(struct amba_device *);
struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
@@ -89,4 +92,46 @@ void amba_release_regions(struct amba_device *);
#define amba_manf(d) AMBA_MANF_BITS((d)->periphid)
#define amba_part(d) AMBA_PART_BITS((d)->periphid)
#define __AMBA_DEV(busid, data, mask) \
{ \
.coherent_dma_mask = mask, \
.init_name = busid, \
.platform_data = data, \
}
/*
* APB devices do not themselves have the ability to address memory,
* so DMA masks should be zero (much like USB peripheral devices.)
* The DMA controller DMA masks should be used instead (much like
* USB host controllers in conventional PCs.)
*/
#define AMBA_APB_DEVICE(name, busid, id, base, irqs, data) \
struct amba_device name##_device = { \
.dev = __AMBA_DEV(busid, data, 0), \
.res = DEFINE_RES_MEM(base, SZ_4K), \
.irq = irqs, \
.periphid = id, \
}
/*
* AHB devices are DMA capable, so set their DMA masks
*/
#define AMBA_AHB_DEVICE(name, busid, id, base, irqs, data) \
struct amba_device name##_device = { \
.dev = __AMBA_DEV(busid, data, ~0ULL), \
.res = DEFINE_RES_MEM(base, SZ_4K), \
.dma_mask = ~0ULL, \
.irq = irqs, \
.periphid = id, \
}
/*
* module_amba_driver() - Helper macro for drivers that don't do anything
* special in module init/exit. This eliminates a lot of boilerplate. Each
* module may only use this macro once, and calling it replaces module_init()
* and module_exit()
*/
#define module_amba_driver(__amba_drv) \
module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
#endif

View File

@@ -6,6 +6,19 @@
#include <linux/mmc/host.h>
/*
* These defines is places here due to access is needed from machine
* configuration files. The ST Micro version does not have ROD and
* reuse the voltage registers for direction settings.
*/
#define MCI_ST_DATA2DIREN (1 << 2)
#define MCI_ST_CMDDIREN (1 << 3)
#define MCI_ST_DATA0DIREN (1 << 4)
#define MCI_ST_DATA31DIREN (1 << 5)
#define MCI_ST_FBCLKEN (1 << 7)
#define MCI_ST_DATA74DIREN (1 << 8)
/* Just some dummy forwarding */
struct dma_chan;
@@ -18,7 +31,8 @@ struct dma_chan;
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
* @vdd_handler: a callback function to translate a MMC_VDD_*
* @ios_handler: a callback function to act on specfic ios changes,
* used for example to control a levelshifter
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
@@ -31,6 +45,8 @@ struct dma_chan;
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
* @capabilities2: more capabilities, MMC_CAP2_* from mmc/host.h
* @sigdir: a bit field indicating for what bits in the MMC bus the host
* should enable signal direction indication.
* @dma_filter: function used to select an appropriate RX and TX
* DMA channel to be used for DMA, if and only if you're deploying the
* generic DMA engine
@@ -46,14 +62,14 @@ struct dma_chan;
struct mmci_platform_data {
unsigned int f_max;
unsigned int ocr_mask;
u32 (*vdd_handler)(struct device *, unsigned int vdd,
unsigned char power_mode);
int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;
bool cd_invert;
unsigned long capabilities;
unsigned long capabilities2;
u32 sigdir;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;

View File

@@ -25,8 +25,6 @@
#ifndef _SSP_PL022_H
#define _SSP_PL022_H
#include <linux/device.h>
/**
* whether SSP is in loopback mode or not
*/
@@ -241,6 +239,8 @@ struct dma_chan;
* @autosuspend_delay: delay in ms following transfer completion before the
* runtime power management system suspends the device. A setting of 0
* indicates no delay and the device will be suspended immediately.
* @rt: indicates the controller should run the message pump with realtime
* priority to minimise the transfer latency on the bus.
*/
struct pl022_ssp_controller {
u16 bus_id;
@@ -250,6 +250,7 @@ struct pl022_ssp_controller {
void *dma_rx_param;
void *dma_tx_param;
int autosuspend_delay;
bool rt;
};
/**

View File

@@ -47,9 +47,6 @@ enum {
* @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel
* @cctl_opt: default options for the channel control register
* @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
* channels. Fill with 'true' if peripheral should be flow controller. Direction
* will be selected at Runtime.
* @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring
@@ -68,7 +65,6 @@ struct pl08x_channel_data {
int max_signal;
u32 muxval;
u32 cctl;
bool device_fc;
dma_addr_t addr;
bool circular_buffer;
bool single;
@@ -176,13 +172,15 @@ enum pl08x_dma_chan_state {
* @runtime_addr: address for RX/TX according to the runtime config
* @runtime_direction: current direction of this channel according to
* runtime config
* @lc: last completed transaction on this channel
* @pend_list: queued transactions pending on this channel
* @at: active transaction on this channel
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy
* @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
* channels. Fill with 'true' if peripheral should be flow controller. Direction
* will be selected at Runtime.
* @waiting: a TX descriptor on this channel which is waiting for a physical
* channel to become available
*/
@@ -198,13 +196,13 @@ struct pl08x_dma_chan {
u32 src_cctl;
u32 dst_cctl;
enum dma_transfer_direction runtime_direction;
dma_cookie_t lc;
struct list_head pend_list;
struct pl08x_txd *at;
spinlock_t lock;
struct pl08x_driver_data *host;
enum pl08x_dma_chan_state state;
bool slave;
bool device_fc;
struct pl08x_txd *waiting;
};

View File

@@ -13,7 +13,6 @@
#define __AMBA_PL330_H_
#include <linux/dmaengine.h>
#include <asm/hardware/pl330.h>
struct dma_pl330_platdata {
/*

View File

@@ -23,6 +23,8 @@
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
#include <linux/types.h>
/* -------------------------------------------------------------------------------
* From AMBA UART (PL010) Block Specification
* -------------------------------------------------------------------------------

View File

@@ -28,7 +28,7 @@ struct task_struct;
struct pci_dev;
extern int amd_iommu_detect(void);
extern int amd_iommu_init_hardware(void);
/**
* amd_iommu_enable_device_erratum() - Enable erratum workaround for device

26
include/linux/apple_bl.h Normal file
View File

@@ -0,0 +1,26 @@
/*
* apple_bl exported symbols
*/
#ifndef _LINUX_APPLE_BL_H
#define _LINUX_APPLE_BL_H
#ifdef CONFIG_BACKLIGHT_APPLE
extern int apple_bl_register(void);
extern void apple_bl_unregister(void);
#else /* !CONFIG_BACKLIGHT_APPLE */
static inline int apple_bl_register(void)
{
return 0;
}
static inline void apple_bl_unregister(void)
{
}
#endif /* !CONFIG_BACKLIGHT_APPLE */
#endif /* _LINUX_APPLE_BL_H */

View File

@@ -213,10 +213,10 @@ struct atm_cirange {
#ifdef __KERNEL__
#include <linux/device.h>
#include <linux/wait.h> /* wait_queue_head_t */
#include <linux/time.h> /* struct timeval */
#include <linux/net.h>
#include <linux/bug.h>
#include <linux/skbuff.h> /* struct sk_buff */
#include <linux/uio.h>
#include <net/sock.h>
@@ -249,6 +249,7 @@ struct k_atm_dev_stats {
struct k_atm_aal_stats aal5;
};
struct device;
enum {
ATM_VF_ADDR, /* Address is in use. Set by anybody, cleared

View File

@@ -33,11 +33,20 @@
struct clk;
/**
* struct atmel_tcb_config - SoC data for a Timer/Counter Block
* @counter_width: size in bits of a timer counter register
*/
struct atmel_tcb_config {
size_t counter_width;
};
/**
* struct atmel_tc - information about a Timer/Counter Block
* @pdev: physical device
* @iomem: resource associated with the I/O register
* @regs: mapping through which the I/O registers can be accessed
* @tcb_config: configuration data from SoC
* @irq: irq for each of the three channels
* @clk: internal clock source for each of the three channels
* @node: list node, for tclib internal use
@@ -54,6 +63,7 @@ struct atmel_tc {
struct platform_device *pdev;
struct resource *iomem;
void __iomem *regs;
struct atmel_tcb_config *tcb_config;
int irq[3];
struct clk *clk[3];
struct list_head node;

View File

@@ -24,7 +24,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
#ifndef atomic_inc_not_zero
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
/**
* atomic_inc_not_zero_hint - increment if not null

View File

@@ -9,10 +9,11 @@
#ifndef _ATTRIBUTE_CONTAINER_H_
#define _ATTRIBUTE_CONTAINER_H_
#include <linux/device.h>
#include <linux/list.h>
#include <linux/klist.h>
struct device;
struct attribute_container {
struct list_head node;
struct klist containers;

View File

@@ -684,7 +684,7 @@ extern void audit_log_untrustedstring(struct audit_buffer *ab,
const char *string);
extern void audit_log_d_path(struct audit_buffer *ab,
const char *prefix,
struct path *path);
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
extern void audit_log_lost(const char *message);

View File

@@ -136,6 +136,7 @@ struct bcma_device {
bool dev_registered;
u8 core_index;
u8 core_unit;
u32 addr;
u32 wrap;
@@ -175,6 +176,12 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
extern void bcma_driver_unregister(struct bcma_driver *drv);
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int bcma_arch_register_fallback_sprom(
int (*sprom_callback)(struct bcma_bus *bus,
struct ssb_sprom *out));
struct bcma_bus {
/* The MMIO area. */
void __iomem *mmio;
@@ -195,6 +202,7 @@ struct bcma_bus {
struct list_head cores;
u8 nr_cores;
u8 init_done:1;
u8 num;
struct bcma_drv_cc drv_cc;
struct bcma_drv_pci drv_pci;
@@ -282,6 +290,7 @@ static inline void bcma_maskset16(struct bcma_device *cc,
bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
}
extern struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid);
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
extern int bcma_core_enable(struct bcma_device *core, u32 flags);

View File

@@ -56,6 +56,9 @@
#define BCMA_CC_OTPS_HW_PROTECT 0x00000001
#define BCMA_CC_OTPS_SW_PROTECT 0x00000002
#define BCMA_CC_OTPS_CID_PROTECT 0x00000004
#define BCMA_CC_OTPS_GU_PROG_IND 0x00000F00 /* General Use programmed indication */
#define BCMA_CC_OTPS_GU_PROG_IND_SHIFT 8
#define BCMA_CC_OTPS_GU_PROG_HW 0x00000100 /* HW region programmed */
#define BCMA_CC_OTPC 0x0014 /* OTP control */
#define BCMA_CC_OTPC_RECWAIT 0xFF000000
#define BCMA_CC_OTPC_PROGWAIT 0x00FFFF00
@@ -72,6 +75,8 @@
#define BCMA_CC_OTPP_READ 0x40000000
#define BCMA_CC_OTPP_START 0x80000000
#define BCMA_CC_OTPP_BUSY 0x80000000
#define BCMA_CC_OTPL 0x001C /* OTP layout */
#define BCMA_CC_OTPL_GURGN_OFFSET 0x00000FFF /* offset of general use region */
#define BCMA_CC_IRQSTAT 0x0020
#define BCMA_CC_IRQMASK 0x0024
#define BCMA_CC_IRQ_GPIO 0x00000001 /* gpio intr */
@@ -79,6 +84,10 @@
#define BCMA_CC_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */
#define BCMA_CC_CHIPCTL 0x0028 /* Rev >= 11 only */
#define BCMA_CC_CHIPSTAT 0x002C /* Rev >= 11 only */
#define BCMA_CC_CHIPST_4313_SPROM_PRESENT 1
#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */
#define BCMA_CC_JCMD_START 0x80000000
#define BCMA_CC_JCMD_BUSY 0x80000000
@@ -181,6 +190,22 @@
#define BCMA_CC_FLASH_CFG 0x0128
#define BCMA_CC_FLASH_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */
#define BCMA_CC_FLASH_WAITCNT 0x012C
#define BCMA_CC_SROM_CONTROL 0x0190
#define BCMA_CC_SROM_CONTROL_START 0x80000000
#define BCMA_CC_SROM_CONTROL_BUSY 0x80000000
#define BCMA_CC_SROM_CONTROL_OPCODE 0x60000000
#define BCMA_CC_SROM_CONTROL_OP_READ 0x00000000
#define BCMA_CC_SROM_CONTROL_OP_WRITE 0x20000000
#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
#define BCMA_CC_SROM_CONTROL_SIZE_4K 0x00000002
#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004
#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1
#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001
/* 0x1E0 is defined as shared BCMA_CLKCTLST */
#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
#define BCMA_CC_UART0_DATA 0x0300
@@ -240,7 +265,6 @@
#define BCMA_CC_PLLCTL_ADDR 0x0660
#define BCMA_CC_PLLCTL_DATA 0x0664
#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */
#define BCMA_CC_SPROM_PCIE6 0x0830 /* SPROM beginning on PCIe rev >= 6 */
/* Divider allocation in 4716/47162/5356 */
#define BCMA_CC_PMU5_MAINPLL_CPU 1

View File

@@ -53,6 +53,35 @@ struct pci_dev;
#define BCMA_CORE_PCI_SBTOPCI1_MASK 0xFC000000
#define BCMA_CORE_PCI_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */
#define BCMA_CORE_PCI_SBTOPCI2_MASK 0xC0000000
#define BCMA_CORE_PCI_CONFIG_ADDR 0x0120 /* pcie config space access */
#define BCMA_CORE_PCI_CONFIG_DATA 0x0124 /* pcie config space access */
#define BCMA_CORE_PCI_MDIO_CONTROL 0x0128 /* controls the mdio access */
#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL 0x2
#define BCMA_CORE_PCI_MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
#define BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */
#define BCMA_CORE_PCI_MDIO_DATA 0x012c /* Data to the mdio access */
#define BCMA_CORE_PCI_MDIODATA_MASK 0x0000ffff /* data 2 bytes */
#define BCMA_CORE_PCI_MDIODATA_TA 0x00020000 /* Turnaround */
#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */
#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */
#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */
#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */
#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */
#define BCMA_CORE_PCI_MDIODATA_WRITE 0x10000000 /* write Transaction */
#define BCMA_CORE_PCI_MDIODATA_READ 0x20000000 /* Read Transaction */
#define BCMA_CORE_PCI_MDIODATA_START 0x40000000 /* start of Transaction */
#define BCMA_CORE_PCI_MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
#define BCMA_CORE_PCI_MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
#define BCMA_CORE_PCI_MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */
#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */
#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */
#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
#define BCMA_CORE_PCI_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */
@@ -72,20 +101,114 @@ struct pci_dev;
#define BCMA_CORE_PCI_SBTOPCI_RC_READL 0x00000010 /* Memory read line */
#define BCMA_CORE_PCI_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */
/* PCIE protocol PHY diagnostic registers */
#define BCMA_CORE_PCI_PLP_MODEREG 0x200 /* Mode */
#define BCMA_CORE_PCI_PLP_STATUSREG 0x204 /* Status */
#define BCMA_CORE_PCI_PLP_POLARITYINV_STAT 0x10 /* Status reg PCIE_PLP_STATUSREG */
#define BCMA_CORE_PCI_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */
#define BCMA_CORE_PCI_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */
#define BCMA_CORE_PCI_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */
#define BCMA_CORE_PCI_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */
#define BCMA_CORE_PCI_PLP_ATTNREG 0x218 /* Attention */
#define BCMA_CORE_PCI_PLP_ATTNMASKREG 0x21C /* Attention Mask */
#define BCMA_CORE_PCI_PLP_RXERRCTR 0x220 /* Rx Error */
#define BCMA_CORE_PCI_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */
#define BCMA_CORE_PCI_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */
#define BCMA_CORE_PCI_PLP_TESTCTRLREG 0x22C /* Test Control reg */
#define BCMA_CORE_PCI_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */
#define BCMA_CORE_PCI_PLP_TIMINGOVRDREG 0x234 /* Timing param override */
#define BCMA_CORE_PCI_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */
#define BCMA_CORE_PCI_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */
/* PCIE protocol DLLP diagnostic registers */
#define BCMA_CORE_PCI_DLLP_LCREG 0x100 /* Link Control */
#define BCMA_CORE_PCI_DLLP_LSREG 0x104 /* Link Status */
#define BCMA_CORE_PCI_DLLP_LAREG 0x108 /* Link Attention */
#define BCMA_CORE_PCI_DLLP_LSREG_LINKUP (1 << 16)
#define BCMA_CORE_PCI_DLLP_LAMASKREG 0x10C /* Link Attention Mask */
#define BCMA_CORE_PCI_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */
#define BCMA_CORE_PCI_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */
#define BCMA_CORE_PCI_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */
#define BCMA_CORE_PCI_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */
#define BCMA_CORE_PCI_DLLP_LRREG 0x120 /* Link Replay */
#define BCMA_CORE_PCI_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */
#define BCMA_CORE_PCI_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
#define BCMA_CORE_PCI_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */
#define BCMA_CORE_PCI_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */
#define BCMA_CORE_PCI_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */
#define BCMA_CORE_PCI_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */
#define BCMA_CORE_PCI_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */
#define BCMA_CORE_PCI_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */
#define BCMA_CORE_PCI_DLLP_ERRCTRREG 0x144 /* Error Counter */
#define BCMA_CORE_PCI_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */
#define BCMA_CORE_PCI_DLLP_TESTREG 0x14C /* Test */
#define BCMA_CORE_PCI_DLLP_PKTBIST 0x150 /* Packet BIST */
#define BCMA_CORE_PCI_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */
/* SERDES RX registers */
#define BCMA_CORE_PCI_SERDES_RX_CTRL 1 /* Rx cntrl */
#define BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
#define BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
#define BCMA_CORE_PCI_SERDES_RX_TIMER1 2 /* Rx Timer1 */
#define BCMA_CORE_PCI_SERDES_RX_CDR 6 /* CDR */
#define BCMA_CORE_PCI_SERDES_RX_CDRBW 7 /* CDR BW */
/* SERDES PLL registers */
#define BCMA_CORE_PCI_SERDES_PLL_CTRL 1 /* PLL control reg */
#define BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
/* PCIcore specific boardflags */
#define BCMA_CORE_PCI_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */
/* PCIE Config space accessing MACROS */
#define BCMA_CORE_PCI_CFG_BUS_SHIFT 24 /* Bus shift */
#define BCMA_CORE_PCI_CFG_SLOT_SHIFT 19 /* Slot/Device shift */
#define BCMA_CORE_PCI_CFG_FUN_SHIFT 16 /* Function shift */
#define BCMA_CORE_PCI_CFG_OFF_SHIFT 0 /* Register shift */
#define BCMA_CORE_PCI_CFG_BUS_MASK 0xff /* Bus mask */
#define BCMA_CORE_PCI_CFG_SLOT_MASK 0x1f /* Slot/Device mask */
#define BCMA_CORE_PCI_CFG_FUN_MASK 7 /* Function mask */
#define BCMA_CORE_PCI_CFG_OFF_MASK 0xfff /* Register mask */
/* PCIE Root Capability Register bits (Host mode only) */
#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
struct bcma_drv_pci;
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
struct bcma_drv_pci_host {
struct bcma_drv_pci *pdev;
u32 host_cfg_addr;
spinlock_t cfgspace_lock;
struct pci_controller pci_controller;
struct pci_ops pci_ops;
struct resource mem_resource;
struct resource io_resource;
};
#endif
struct bcma_drv_pci {
struct bcma_device *core;
u8 setup_done:1;
u8 hostmode:1;
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
struct bcma_drv_pci_host *host_controller;
#endif
};
/* Register access */
#define pcicore_read32(pc, offset) bcma_read32((pc)->core, offset)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
extern void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc);
extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
#endif /* LINUX_BCMA_DRIVER_PCI_H_ */

View File

@@ -56,4 +56,31 @@
#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */
#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */
/* SiliconBackplane Address Map.
* All regions may not exist on all chips.
*/
#define BCMA_SOC_SDRAM_BASE 0x00000000U /* Physical SDRAM */
#define BCMA_SOC_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */
#define BCMA_SOC_PCI_MEM_SZ (64 * 1024 * 1024)
#define BCMA_SOC_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */
#define BCMA_SOC_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */
#define BCMA_SOC_SDRAM_R2 0x80000000U /* Region 2 for sdram (512 MB) */
#define BCMA_SOC_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */
#define BCMA_SOC_PCI_DMA2 0x80000000U /* Client Mode sb2pcitranslation2 (1 GB) */
#define BCMA_SOC_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */
#define BCMA_SOC_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2
* (2 ZettaBytes), low 32 bits
*/
#define BCMA_SOC_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2
* (2 ZettaBytes), high 32 bits
*/
#define BCMA_SOC_PCI1_MEM 0x40000000U /* Host Mode sb2pcitranslation0 (64 MB) */
#define BCMA_SOC_PCI1_CFG 0x44000000U /* Host Mode sb2pcitranslation1 (64 MB) */
#define BCMA_SOC_PCIE1_DMA_H32 0xc0000000U /* PCIE Client Mode sb2pcitranslation2
* (2 ZettaBytes), high 32 bits
*/
#endif /* LINUX_BCMA_REGS_H_ */

View File

@@ -92,17 +92,17 @@ struct linux_binfmt {
unsigned long min_coredump; /* minimal dump size */
};
extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
/* Registration of default binfmt handlers */
static inline int register_binfmt(struct linux_binfmt *fmt)
static inline void register_binfmt(struct linux_binfmt *fmt)
{
return __register_binfmt(fmt, 0);
__register_binfmt(fmt, 0);
}
/* Same as above, but adds a new binfmt at the top of the list */
static inline int insert_binfmt(struct linux_binfmt *fmt)
static inline void insert_binfmt(struct linux_binfmt *fmt)
{
return __register_binfmt(fmt, 1);
__register_binfmt(fmt, 1);
}
extern void unregister_binfmt(struct linux_binfmt *);

View File

@@ -23,6 +23,7 @@
#include <linux/highmem.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
#include <linux/bug.h>
#ifdef CONFIG_BLOCK
@@ -101,10 +102,10 @@ static inline int bio_has_allocated_vec(struct bio *bio)
* I/O completely on that queue (see ide-dma for example)
*/
#define __bio_kmap_atomic(bio, idx, kmtype) \
(kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
(kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
bio_iovec_idx((bio), (idx))->bv_offset)
#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
/*
* merge helpers etc
@@ -317,7 +318,7 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
* balancing is a lot nicer this way
*/
local_irq_save(*flags);
addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
addr = (unsigned long) kmap_atomic(bvec->bv_page);
BUG_ON(addr & ~PAGE_MASK);
@@ -328,7 +329,7 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
kunmap_atomic((void *) ptr);
local_irq_restore(*flags);
}

View File

@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/preempt.h>
#include <linux/atomic.h>
#include <linux/bug.h>
/*
* bit-based spin_lock()

View File

@@ -27,11 +27,22 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) = find_next_bit((addr), (size), (bit) + 1))
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_cont(bit, addr, size) \
#define for_each_set_bit_from(bit, addr, size) \
for ((bit) = find_next_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
#define for_each_clear_bit(bit, addr, size) \
for ((bit) = find_first_zero_bit((addr), (size)); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
/* same as for_each_clear_bit() but use bit as value to start with */
#define for_each_clear_bit_from(bit, addr, size) \
for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;

View File

@@ -11,6 +11,67 @@ enum bug_trap_type {
struct pt_regs;
#ifdef __CHECKER__
#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
#define BUILD_BUG_ON(condition)
#define BUILD_BUG() (0)
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
/* Force a compilation error if condition is true, but also produce a
result (of value 0 and type size_t), so the expression can be used
e.g. in a structure initializer (or where-ever else comma expressions
aren't permitted). */
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
/**
* BUILD_BUG_ON - break compile if a condition is true.
* @condition: the condition which the compiler should know is false.
*
* If you have some code which relies on certain constants being equal, or
* other compile-time-evaluated condition, you should use BUILD_BUG_ON to
* detect if someone changes it.
*
* The implementation uses gcc's reluctance to create a negative array, but
* gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
* to inline functions). So as a fallback we use the optimizer; if it can't
* prove the condition is false, it will cause a link error on the undefined
* "__build_bug_on_failed". This error message can be harder to track down
* though, hence the two different methods.
*/
#ifndef __OPTIMIZE__
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
#else
extern int __build_bug_on_failed;
#define BUILD_BUG_ON(condition) \
do { \
((void)sizeof(char[1 - 2*!!(condition)])); \
if (condition) __build_bug_on_failed = 1; \
} while(0)
#endif
/**
* BUILD_BUG - break compile if used.
*
* If you have some code that you expect the compiler to eliminate at
* build time, you should use BUILD_BUG to detect if it is
* unexpectedly used.
*/
#define BUILD_BUG() \
do { \
extern void __build_bug_failed(void) \
__linktime_error("BUILD_BUG failed"); \
__build_bug_failed(); \
} while (0)
#endif /* __CHECKER__ */
#ifdef CONFIG_GENERIC_BUG
#include <asm-generic/bug.h>

View File

@@ -9,11 +9,12 @@
* the Free Software Foundation
*/
#include <linux/device.h>
#include <linux/kmemcheck.h>
#define C2PORT_NAME_LEN 32
struct device;
/*
* C2 port basic structs
*/

View File

@@ -92,7 +92,7 @@ void can_bus_off(struct net_device *dev);
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
void can_get_echo_skb(struct net_device *dev, unsigned int idx);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);

View File

@@ -910,7 +910,6 @@ struct mode_page_header {
#ifdef __KERNEL__
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/device.h>
#include <linux/list.h>
struct packet_command

View File

@@ -1,8 +1,9 @@
#ifndef __CEPH_DECODE_H
#define __CEPH_DECODE_H
#include <asm/unaligned.h>
#include <linux/bug.h>
#include <linux/time.h>
#include <asm/unaligned.h>
#include "types.h"

View File

@@ -7,6 +7,7 @@
#include <linux/backing-dev.h>
#include <linux/completion.h>
#include <linux/exportfs.h>
#include <linux/bug.h>
#include <linux/fs.h>
#include <linux/mempool.h>
#include <linux/pagemap.h>
@@ -207,7 +208,7 @@ extern struct kmem_cache *ceph_cap_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
extern int ceph_parse_options(struct ceph_options **popt, char *options,
extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
void *private);

View File

@@ -1,6 +1,7 @@
#ifndef _FS_CEPH_MDSMAP_H
#define _FS_CEPH_MDSMAP_H
#include <linux/bug.h>
#include "types.h"
/*

View File

@@ -14,8 +14,6 @@
struct ceph_msg;
struct ceph_connection;
extern struct workqueue_struct *ceph_msgr_wq; /* receive work queue */
/*
* Ceph defines these callbacks for handling connection events.
*/
@@ -54,7 +52,6 @@ struct ceph_connection_operations {
struct ceph_messenger {
struct ceph_entity_inst inst; /* my name+address */
struct ceph_entity_addr my_enc_addr;
struct page *zero_page; /* used in certain error cases */
bool nocrc;
@@ -101,7 +98,7 @@ struct ceph_msg {
struct ceph_msg_pos {
int page, page_pos; /* which page; offset in page */
int data_pos; /* offset in data payload */
int did_page_crc; /* true if we've calculated crc for current page */
bool did_page_crc; /* true if we've calculated crc for current page */
};
/* ceph connection fault delay defaults, for exponential backoff */

View File

@@ -160,38 +160,6 @@ enum {
CGRP_CLONE_CHILDREN,
};
/* which pidlist file are we talking about? */
enum cgroup_filetype {
CGROUP_FILE_PROCS,
CGROUP_FILE_TASKS,
};
/*
* A pidlist is a list of pids that virtually represents the contents of one
* of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
* a pair (one each for procs, tasks) for each pid namespace that's relevant
* to the cgroup.
*/
struct cgroup_pidlist {
/*
* used to find which pidlist is wanted. doesn't change as long as
* this particular list stays in the list.
*/
struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
/* array of xids */
pid_t *list;
/* how many elements the above list has */
int length;
/* how many files are using the current array */
int use_count;
/* each of these stored in a list by its cgroup */
struct list_head links;
/* pointer to the cgroup we belong to, for list removal purposes */
struct cgroup *owner;
/* protects the other fields */
struct rw_semaphore mutex;
};
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
@@ -484,23 +452,18 @@ int cgroup_taskset_size(struct cgroup_taskset *tset);
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
struct cgroup_subsys_state *(*create)(struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup *cgrp);
void (*destroy)(struct cgroup *cgrp);
int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*fork)(struct task_struct *task);
void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*post_clone)(struct cgroup *cgrp);
void (*bind)(struct cgroup *root);
int subsys_id;
int active;
@@ -535,7 +498,7 @@ struct cgroup_subsys {
struct list_head sibling;
/* used when use_id == true */
struct idr idr;
rwlock_t id_lock;
spinlock_t id_lock;
/* should be defined only by modular subsystems */
struct module *module;
@@ -602,11 +565,6 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
{
return cgroup_attach_task_all(current, tsk);
}
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
* if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
@@ -669,10 +627,6 @@ static inline int cgroup_attach_task_all(struct task_struct *from,
{
return 0;
}
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
return 0;
}
#endif /* !CONFIG_CGROUPS */

View File

@@ -28,9 +28,9 @@ struct cleancache_ops {
pgoff_t, struct page *);
void (*put_page)(int, struct cleancache_filekey,
pgoff_t, struct page *);
void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
void (*flush_inode)(int, struct cleancache_filekey);
void (*flush_fs)(int);
void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
void (*invalidate_inode)(int, struct cleancache_filekey);
void (*invalidate_fs)(int);
};
extern struct cleancache_ops
@@ -39,9 +39,9 @@ extern void __cleancache_init_fs(struct super_block *);
extern void __cleancache_init_shared_fs(char *, struct super_block *);
extern int __cleancache_get_page(struct page *);
extern void __cleancache_put_page(struct page *);
extern void __cleancache_flush_page(struct address_space *, struct page *);
extern void __cleancache_flush_inode(struct address_space *);
extern void __cleancache_flush_fs(struct super_block *);
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
extern void __cleancache_invalidate_inode(struct address_space *);
extern void __cleancache_invalidate_fs(struct super_block *);
extern int cleancache_enabled;
#ifdef CONFIG_CLEANCACHE
@@ -99,24 +99,24 @@ static inline void cleancache_put_page(struct page *page)
__cleancache_put_page(page);
}
static inline void cleancache_flush_page(struct address_space *mapping,
static inline void cleancache_invalidate_page(struct address_space *mapping,
struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
__cleancache_flush_page(mapping, page);
__cleancache_invalidate_page(mapping, page);
}
static inline void cleancache_flush_inode(struct address_space *mapping)
static inline void cleancache_invalidate_inode(struct address_space *mapping)
{
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
__cleancache_flush_inode(mapping);
__cleancache_invalidate_inode(mapping);
}
static inline void cleancache_flush_fs(struct super_block *sb)
static inline void cleancache_invalidate_fs(struct super_block *sb)
{
if (cleancache_enabled)
__cleancache_flush_fs(sb);
__cleancache_invalidate_fs(sb);
}
#endif /* _LINUX_CLEANCACHE_H */

196
include/linux/clk-private.h Normal file
View File

@@ -0,0 +1,196 @@
/*
* linux/include/linux/clk-private.h
*
* Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_PRIVATE_H
#define __LINUX_CLK_PRIVATE_H
#include <linux/clk-provider.h>
#include <linux/list.h>
/*
* WARNING: Do not include clk-private.h from any file that implements struct
* clk_ops. Doing so is a layering violation!
*
* This header exists only to allow for statically initialized clock data. Any
* static clock data must be defined in a separate file from the logic that
* implements the clock operations for that same data.
*/
#ifdef CONFIG_COMMON_CLK
struct clk {
const char *name;
const struct clk_ops *ops;
struct clk_hw *hw;
struct clk *parent;
char **parent_names;
struct clk **parents;
u8 num_parents;
unsigned long rate;
unsigned long new_rate;
unsigned long flags;
unsigned int enable_count;
unsigned int prepare_count;
struct hlist_head children;
struct hlist_node child_node;
unsigned int notifier_count;
#ifdef CONFIG_COMMON_CLK_DEBUG
struct dentry *dentry;
#endif
};
/*
* DOC: Basic clock implementations common to many platforms
*
* Each basic clock hardware type is comprised of a structure describing the
* clock hardware, implementations of the relevant callbacks in struct clk_ops,
* unique flags for that hardware type, a registration function and an
* alternative macro for static initialization
*/
extern struct clk_ops clk_fixed_rate_ops;
#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
_fixed_rate_flags) \
static struct clk _name; \
static char *_name##_parent_names[] = {}; \
static struct clk_fixed_rate _name##_hw = { \
.hw = { \
.clk = &_name, \
}, \
.fixed_rate = _rate, \
.flags = _fixed_rate_flags, \
}; \
static struct clk _name = { \
.name = #_name, \
.ops = &clk_fixed_rate_ops, \
.hw = &_name##_hw.hw, \
.parent_names = _name##_parent_names, \
.num_parents = \
ARRAY_SIZE(_name##_parent_names), \
.flags = _flags, \
};
extern struct clk_ops clk_gate_ops;
#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \
_flags, _reg, _bit_idx, \
_gate_flags, _lock) \
static struct clk _name; \
static char *_name##_parent_names[] = { \
_parent_name, \
}; \
static struct clk *_name##_parents[] = { \
_parent_ptr, \
}; \
static struct clk_gate _name##_hw = { \
.hw = { \
.clk = &_name, \
}, \
.reg = _reg, \
.bit_idx = _bit_idx, \
.flags = _gate_flags, \
.lock = _lock, \
}; \
static struct clk _name = { \
.name = #_name, \
.ops = &clk_gate_ops, \
.hw = &_name##_hw.hw, \
.parent_names = _name##_parent_names, \
.num_parents = \
ARRAY_SIZE(_name##_parent_names), \
.parents = _name##_parents, \
.flags = _flags, \
};
extern struct clk_ops clk_divider_ops;
#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
_flags, _reg, _shift, _width, \
_divider_flags, _lock) \
static struct clk _name; \
static char *_name##_parent_names[] = { \
_parent_name, \
}; \
static struct clk *_name##_parents[] = { \
_parent_ptr, \
}; \
static struct clk_divider _name##_hw = { \
.hw = { \
.clk = &_name, \
}, \
.reg = _reg, \
.shift = _shift, \
.width = _width, \
.flags = _divider_flags, \
.lock = _lock, \
}; \
static struct clk _name = { \
.name = #_name, \
.ops = &clk_divider_ops, \
.hw = &_name##_hw.hw, \
.parent_names = _name##_parent_names, \
.num_parents = \
ARRAY_SIZE(_name##_parent_names), \
.parents = _name##_parents, \
.flags = _flags, \
};
extern struct clk_ops clk_mux_ops;
#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
_reg, _shift, _width, \
_mux_flags, _lock) \
static struct clk _name; \
static struct clk_mux _name##_hw = { \
.hw = { \
.clk = &_name, \
}, \
.reg = _reg, \
.shift = _shift, \
.width = _width, \
.flags = _mux_flags, \
.lock = _lock, \
}; \
static struct clk _name = { \
.name = #_name, \
.ops = &clk_mux_ops, \
.hw = &_name##_hw.hw, \
.parent_names = _parent_names, \
.num_parents = \
ARRAY_SIZE(_parent_names), \
.parents = _parents, \
.flags = _flags, \
};
/**
* __clk_init - initialize the data structures in a struct clk
* @dev: device initializing this clk, placeholder for now
* @clk: clk being initialized
*
* Initializes the lists in struct clk, queries the hardware for the
* parent and rate and sets them both.
*
* Any struct clk passed into __clk_init must have the following members
* populated:
* .name
* .ops
* .hw
* .parent_names
* .num_parents
* .flags
*
* It is not necessary to call clk_register if __clk_init is used directly with
* statically initialized clock data.
*/
void __clk_init(struct device *dev, struct clk *clk);
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PRIVATE_H */

View File

@@ -0,0 +1,300 @@
/*
* linux/include/linux/clk-provider.h
*
* Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
#include <linux/clk.h>
#ifdef CONFIG_COMMON_CLK
/**
* struct clk_hw - handle for traversing from a struct clk to its corresponding
* hardware-specific structure. struct clk_hw should be declared within struct
* clk_foo and then referenced by the struct clk instance that uses struct
* clk_foo's clk_ops
*
* clk: pointer to the struct clk instance that points back to this struct
* clk_hw instance
*/
struct clk_hw {
struct clk *clk;
};
/*
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
* belong in struct clk_foo
*/
#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
* through the clk_* api.
*
* @prepare: Prepare the clock for enabling. This must not return until
* the clock is fully prepared, and it's safe to call clk_enable.
* This callback is intended to allow clock implementations to
* do any initialisation that may sleep. Called with
* prepare_lock held.
*
* @unprepare: Release the clock from its prepared state. This will typically
* undo any work done in the @prepare callback. Called with
* prepare_lock held.
*
* @enable: Enable the clock atomically. This must not return until the
* clock is generating a valid clock signal, usable by consumer
* devices. Called with enable_lock held. This function must not
* sleep.
*
* @disable: Disable the clock atomically. Called with enable_lock held.
* This function must not sleep.
*
* @recalc_rate Recalculate the rate of this clock, by quering hardware. The
* parent rate is an input parameter. It is up to the caller to
* insure that the prepare_mutex is held across this call.
* Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0.
*
* @round_rate: Given a target rate as input, returns the closest rate actually
* supported by the clock.
*
* @get_parent: Queries the hardware to determine the parent of a clock. The
* return value is a u8 which specifies the index corresponding to
* the parent clock. This index can be applied to either the
* .parent_names or .parents arrays. In short, this function
* translates the parent value read from hardware into an array
* index. Currently only called when the clock is initialized by
* __clk_init. This callback is mandatory for clocks with
* multiple parents. It is optional (and unnecessary) for clocks
* with 0 or 1 parents.
*
* @set_parent: Change the input source of this clock; for clocks with multiple
* possible parents specify a new parent by passing in the index
* as a u8 corresponding to the parent in either the .parent_names
* or .parents arrays. This function in affect translates an
* array index into the value programmed into the hardware.
* Returns 0 on success, -EERROR otherwise.
*
* @set_rate: Change the rate of this clock. If this callback returns
* CLK_SET_RATE_PARENT, the rate change will be propagated to the
* parent clock (which may propagate again if the parent clock
* also sets this flag). The requested rate of the parent is
* passed back from the callback in the second 'unsigned long *'
* argument. Note that it is up to the hardware clock's set_rate
* implementation to insure that clocks do not run out of spec
* when propgating the call to set_rate up to the parent. One way
* to do this is to gate the clock (via clk_disable and/or
* clk_unprepare) before calling clk_set_rate, then ungating it
* afterward. If your clock also has the CLK_GATE_SET_RATE flag
* set then this will insure safety. Returns 0 on success,
* -EERROR otherwise.
*
* The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
* implementations to split any work between atomic (enable) and sleepable
* (prepare) contexts. If enabling a clock requires code that might sleep,
* this must be done in clk_prepare. Clock enable code that will never be
* called in a sleepable context may be implement in clk_enable.
*
* Typically, drivers will call clk_prepare when a clock may be needed later
* (eg. when a device is opened), and clk_enable when the clock is actually
* required (eg. from an interrupt). Note that clk_prepare MUST have been
* called before clk_enable.
*/
struct clk_ops {
int (*prepare)(struct clk_hw *hw);
void (*unprepare)(struct clk_hw *hw);
int (*enable)(struct clk_hw *hw);
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long,
unsigned long *);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long);
void (*init)(struct clk_hw *hw);
};
/*
* DOC: Basic clock implementations common to many platforms
*
* Each basic clock hardware type is comprised of a structure describing the
* clock hardware, implementations of the relevant callbacks in struct clk_ops,
* unique flags for that hardware type, a registration function and an
* alternative macro for static initialization
*/
/**
* struct clk_fixed_rate - fixed-rate clock
* @hw: handle between common and hardware-specific interfaces
* @fixed_rate: constant frequency of clock
*/
struct clk_fixed_rate {
struct clk_hw hw;
unsigned long fixed_rate;
u8 flags;
};
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
/**
* struct clk_gate - gating clock
*
* @hw: handle between common and hardware-specific interfaces
* @reg: register controlling gate
* @bit_idx: single bit controlling gate
* @flags: hardware-specific flags
* @lock: register lock
*
* Clock which can gate its output. Implements .enable & .disable
*
* Flags:
* CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to
* enable the clock. Setting this flag does the opposite: setting the bit
* disable the clock and clearing it enables the clock
*/
struct clk_gate {
struct clk_hw hw;
void __iomem *reg;
u8 bit_idx;
u8 flags;
spinlock_t *lock;
char *parent[1];
};
#define CLK_GATE_SET_TO_DISABLE BIT(0)
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
/**
* struct clk_divider - adjustable divider clock
*
* @hw: handle between common and hardware-specific interfaces
* @reg: register containing the divider
* @shift: shift to the divider bit field
* @width: width of the divider bit field
* @lock: register lock
*
* Clock with an adjustable divider affecting its output frequency. Implements
* .recalc_rate, .set_rate and .round_rate
*
* Flags:
* CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
* register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
* the raw value read from the register, with the value of zero considered
* invalid
* CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from
* the hardware register
*/
struct clk_divider {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u8 width;
u8 flags;
spinlock_t *lock;
char *parent[1];
};
#define CLK_DIVIDER_ONE_BASED BIT(0)
#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, spinlock_t *lock);
/**
* struct clk_mux - multiplexer clock
*
* @hw: handle between common and hardware-specific interfaces
* @reg: register controlling multiplexer
* @shift: shift to multiplexer bit field
* @width: width of mutliplexer bit field
* @num_clks: number of parent clocks
* @lock: register lock
*
* Clock with multiple selectable parents. Implements .get_parent, .set_parent
* and .recalc_rate
*
* Flags:
* CLK_MUX_INDEX_ONE - register index starts at 1, not 0
* CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two)
*/
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u8 width;
u8 flags;
spinlock_t *lock;
};
#define CLK_MUX_INDEX_ONE BIT(0)
#define CLK_MUX_INDEX_BIT BIT(1)
struct clk *clk_register_mux(struct device *dev, const char *name,
char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @name: clock name
* @ops: operations this clock supports
* @hw: link to hardware-specific clock data
* @parent_names: array of string names for all possible parents
* @num_parents: number of possible parents
* @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
* rest of the clock API.
*/
struct clk *clk_register(struct device *dev, const char *name,
const struct clk_ops *ops, struct clk_hw *hw,
char **parent_names, u8 num_parents, unsigned long flags);
/* helper functions */
const char *__clk_get_name(struct clk *clk);
struct clk_hw *__clk_get_hw(struct clk *clk);
u8 __clk_get_num_parents(struct clk *clk);
struct clk *__clk_get_parent(struct clk *clk);
inline int __clk_get_enable_count(struct clk *clk);
inline int __clk_get_prepare_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
unsigned long __clk_get_flags(struct clk *clk);
int __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
/*
* FIXME clock api without lock protection
*/
int __clk_prepare(struct clk *clk);
void __clk_unprepare(struct clk *clk);
void __clk_reparent(struct clk *clk, struct clk *new_parent);
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */

View File

@@ -3,6 +3,7 @@
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,19 +13,76 @@
#define __LINUX_CLK_H
#include <linux/kernel.h>
#include <linux/notifier.h>
struct device;
/*
* The base API.
*/
/*
* struct clk - an machine class defined object / cookie.
*/
struct clk;
#ifdef CONFIG_COMMON_CLK
/**
* DOC: clk notifier callback types
*
* PRE_RATE_CHANGE - called immediately before the clk rate is changed,
* to indicate that the rate change will proceed. Drivers must
* immediately terminate any operations that will be affected by the
* rate change. Callbacks may either return NOTIFY_DONE or
* NOTIFY_STOP.
*
* ABORT_RATE_CHANGE: called if the rate change failed for some reason
* after PRE_RATE_CHANGE. In this case, all registered notifiers on
* the clk will be called with ABORT_RATE_CHANGE. Callbacks must
* always return NOTIFY_DONE.
*
* POST_RATE_CHANGE - called after the clk rate change has successfully
* completed. Callbacks must always return NOTIFY_DONE.
*
*/
#define PRE_RATE_CHANGE BIT(0)
#define POST_RATE_CHANGE BIT(1)
#define ABORT_RATE_CHANGE BIT(2)
/**
* struct clk_notifier - associate a clk with a notifier
* @clk: struct clk * to associate the notifier with
* @notifier_head: a blocking_notifier_head for this clk
* @node: linked list pointers
*
* A list of struct clk_notifier is maintained by the notifier code.
* An entry is created whenever code registers the first notifier on a
* particular @clk. Future notifiers on that @clk are added to the
* @notifier_head.
*/
struct clk_notifier {
struct clk *clk;
struct srcu_notifier_head notifier_head;
struct list_head node;
};
/**
* struct clk_notifier_data - rate data to pass to the notifier callback
* @clk: struct clk * being changed
* @old_rate: previous rate of this clk
* @new_rate: new rate of this clk
*
* For a pre-notifier, old_rate is the clk's rate before this rate
* change, and new_rate is what the rate will be in the future. For a
* post-notifier, old_rate and new_rate are both set to the clk's
* current rate (this was done to optimize the implementation).
*/
struct clk_notifier_data {
struct clk *clk;
unsigned long old_rate;
unsigned long new_rate;
};
int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
#endif /* !CONFIG_COMMON_CLK */
/**
* clk_get - lookup and obtain a reference to a clock producer.
* @dev: device for clock "consumer"

View File

@@ -319,13 +319,6 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
__clocksource_updatefreq_scale(cs, 1000, khz);
}
static inline void
clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
{
return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
NSEC_PER_SEC, minsec);
}
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void
update_vsyscall(struct timespec *ts, struct timespec *wtm,

View File

@@ -16,7 +16,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/system.h>
/* this is used only to give gcc a clue about good code generation */
union cnt32_to_63 {

View File

@@ -23,6 +23,7 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
bool sync);
extern int compact_pgdat(pg_data_t *pgdat, int order);
extern unsigned long compaction_suitable(struct zone *zone, int order);
/* Do not skip compaction more than 64 times */
@@ -33,20 +34,26 @@ extern unsigned long compaction_suitable(struct zone *zone, int order);
* allocation success. 1 << compact_defer_limit compactions are skipped up
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
*/
static inline void defer_compaction(struct zone *zone)
static inline void defer_compaction(struct zone *zone, int order)
{
zone->compact_considered = 0;
zone->compact_defer_shift++;
if (order < zone->compact_order_failed)
zone->compact_order_failed = order;
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
}
/* Returns true if compaction should be skipped this time */
static inline bool compaction_deferred(struct zone *zone)
static inline bool compaction_deferred(struct zone *zone, int order)
{
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
if (order < zone->compact_order_failed)
return false;
/* Avoid possible overflow */
if (++zone->compact_considered > defer_limit)
zone->compact_considered = defer_limit;
@@ -62,16 +69,21 @@ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
return COMPACT_CONTINUE;
}
static inline int compact_pgdat(pg_data_t *pgdat, int order)
{
return COMPACT_CONTINUE;
}
static inline unsigned long compaction_suitable(struct zone *zone, int order)
{
return COMPACT_SKIPPED;
}
static inline void defer_compaction(struct zone *zone)
static inline void defer_compaction(struct zone *zone, int order)
{
}
static inline bool compaction_deferred(struct zone *zone)
static inline bool compaction_deferred(struct zone *zone, int order)
{
return 1;
}

View File

@@ -244,6 +244,7 @@ struct compat_sysinfo;
struct compat_sysctl_args;
struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
extern void compat_exit_robust_list(struct task_struct *curr);
@@ -254,13 +255,22 @@ asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
long compat_sys_semctl(int first, int second, int third, void __user *uptr);
long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
int version, void __user *uptr);
long compat_sys_msgctl(int first, int second, void __user *uptr);
long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
void __user *uptr);
#else
long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
size_t msgsz, int msgflg);
long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
size_t msgsz, long msgtyp, int msgflg);
long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
#endif
long compat_sys_msgctl(int first, int second, void __user *uptr);
long compat_sys_shmctl(int first, int second, void __user *uptr);
long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned nsems, const struct compat_timespec __user *timeout);

View File

@@ -87,7 +87,8 @@
*/
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a,b) __attribute__((format(printf,a,b)))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))

View File

@@ -236,7 +236,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
/*
* Rather then using noinline to prevent stack consumption, use
* noinline_for_stack instead. For documentaiton reasons.
* noinline_for_stack instead. For documentation reasons.
*/
#define noinline_for_stack noinline

View File

@@ -43,6 +43,7 @@
#define CN_IDX_DRBD 0x8
#define CN_VAL_DRBD 0x1
#define CN_KVP_IDX 0x9 /* HyperV KVP */
#define CN_KVP_VAL 0x1 /* queries from the kernel */
#define CN_NETLINK_USERS 10 /* Highest index + 1 */

View File

@@ -14,11 +14,12 @@
#ifndef _LINUX_CPU_H_
#define _LINUX_CPU_H_
#include <linux/device.h>
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
struct device;
struct cpu {
int node_id; /* The node which contains the CPU */
int hotpluggable; /* creates sysfs control file if hotpluggable */
@@ -44,6 +45,13 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
extern int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env);
extern ssize_t arch_print_cpu_modalias(struct device *dev,
struct device_attribute *attr,
char *bufptr);
#endif
/*
* CPU notifier priorities.
*/

View File

@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/threads.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/completion.h>
@@ -35,6 +34,7 @@
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
extern void disable_cpufreq(void);
#else /* CONFIG_CPU_FREQ */
static inline int cpufreq_register_notifier(struct notifier_block *nb,
unsigned int list)
@@ -46,6 +46,7 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
{
return 0;
}
static inline void disable_cpufreq(void) { }
#endif /* CONFIG_CPU_FREQ */
/* if (cpufreq_driver->target) exists, the ->governor decides what frequency

View File

@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/completion.h>
#include <linux/hrtimer.h>
#define CPUIDLE_STATE_MAX 8
#define CPUIDLE_NAME_LEN 16
@@ -43,12 +44,15 @@ struct cpuidle_state {
unsigned int flags;
unsigned int exit_latency; /* in US */
unsigned int power_usage; /* in mW */
int power_usage; /* in mW */
unsigned int target_residency; /* in US */
unsigned int disable;
int (*enter) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
int (*enter_dead) (struct cpuidle_device *dev, int index);
};
/* Idle State Flags */
@@ -96,7 +100,6 @@ struct cpuidle_device {
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
void *governor_data;
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -118,10 +121,12 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
****************************/
struct cpuidle_driver {
char name[CPUIDLE_NAME_LEN];
const char *name;
struct module *owner;
unsigned int power_specified:1;
/* set to 1 to use the core cpuidle time keeping (for all states). */
unsigned int en_core_tk_irqen:1;
struct cpuidle_state states[CPUIDLE_STATE_MAX];
int state_count;
int safe_state_index;
@@ -140,6 +145,11 @@ extern void cpuidle_pause_and_lock(void);
extern void cpuidle_resume_and_unlock(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index,
int (*enter)(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index));
extern int cpuidle_play_dead(void);
#else
static inline void disable_cpuidle(void) { }
@@ -157,6 +167,12 @@ static inline void cpuidle_resume_and_unlock(void) { }
static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_wrap_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index,
int (*enter)(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index))
{ return -ENODEV; }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
#endif

View File

@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
#include <linux/bug.h>
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
@@ -763,12 +764,6 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
*
*/
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
/* These strip const, as traditionally they weren't const. */
#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
@@ -809,11 +804,10 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#else /* NR_CPUS > 1 */
int __first_cpu(const cpumask_t *srcp);
int __next_cpu(int n, const cpumask_t *srcp);
int __any_online_cpu(const cpumask_t *mask);
#define first_cpu(src) __first_cpu(&(src))
#define next_cpu(n, src) __next_cpu((n), &(src))
#define any_online_cpu(mask) __any_online_cpu(&(mask))
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = -1; \
(cpu) = next_cpu((cpu), (mask)), \

View File

@@ -22,7 +22,7 @@ extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void);
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
/*
* reading current mems_allowed and mempolicy in the fastpath must protected
* by get_mems_allowed()
* get_mems_allowed is required when making decisions involving mems_allowed
* such as during page allocation. mems_allowed can be updated in parallel
* and depending on the new value an operation can fail potentially causing
* process failure. A retry loop with get_mems_allowed and put_mems_allowed
* prevents these artificial failures.
*/
static inline void get_mems_allowed(void)
static inline unsigned int get_mems_allowed(void)
{
current->mems_allowed_change_disable++;
/*
* ensure that reading mems_allowed and mempolicy happens after the
* update of ->mems_allowed_change_disable.
*
* the write-side task finds ->mems_allowed_change_disable is not 0,
* and knows the read-side task is reading mems_allowed or mempolicy,
* so it will clear old bits lazily.
*/
smp_mb();
return read_seqcount_begin(&current->mems_allowed_seq);
}
static inline void put_mems_allowed(void)
/*
* If this returns false, the operation that took place after get_mems_allowed
* may have failed. It is up to the caller to retry the operation if
* appropriate.
*/
static inline bool put_mems_allowed(unsigned int seq)
{
/*
* ensure that reading mems_allowed and mempolicy before reducing
* mems_allowed_change_disable.
*
* the write-side task will know that the read-side task is still
* reading mems_allowed or mempolicy, don't clears old bits in the
* nodemask.
*/
smp_mb();
--ACCESS_ONCE(current->mems_allowed_change_disable);
return !read_seqcount_retry(&current->mems_allowed_seq, seq);
}
static inline void set_mems_allowed(nodemask_t nodemask)
{
task_lock(current);
write_seqcount_begin(&current->mems_allowed_seq);
current->mems_allowed = nodemask;
write_seqcount_end(&current->mems_allowed_seq);
task_unlock(current);
}
@@ -144,10 +135,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
cpumask_copy(mask, cpu_possible_mask);
}
static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
{
do_set_cpus_allowed(p, cpu_possible_mask);
return cpumask_any(cpu_active_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -234,12 +223,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
{
}
static inline void get_mems_allowed(void)
static inline unsigned int get_mems_allowed(void)
{
return 0;
}
static inline void put_mems_allowed(void)
static inline bool put_mems_allowed(unsigned int seq)
{
return true;
}
#endif /* !CONFIG_CPUSETS */

View File

@@ -3,7 +3,6 @@
#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
#include <linux/device.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>

View File

@@ -11,6 +11,8 @@
extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len);
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
/*

View File

@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -75,6 +76,11 @@
*/
#define CRYPTO_ALG_INSTANCE 0x00000800
/* Set this bit if the algorithm provided is hardware accelerated but
* not available to userspace via instruction set or so.
*/
#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
/*
* Transform masks and values (for crt_flags).
*/
@@ -309,6 +315,8 @@ struct crypto_alg {
*/
int crypto_register_alg(struct crypto_alg *alg);
int crypto_unregister_alg(struct crypto_alg *alg);
int crypto_register_algs(struct crypto_alg *algs, int count);
int crypto_unregister_algs(struct crypto_alg *algs, int count);
/*
* Algorithm query interface.

View File

@@ -100,3 +100,6 @@ struct crypto_report_rng {
char type[CRYPTO_MAX_NAME];
unsigned int seedsize;
};
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))

View File

@@ -47,26 +47,6 @@ struct dentry_stat_t {
};
extern struct dentry_stat_t dentry_stat;
/*
* Compare 2 name strings, return 0 if they match, otherwise non-zero.
* The strings are both count bytes long, and count is non-zero.
*/
static inline int dentry_cmp(const unsigned char *cs, size_t scount,
const unsigned char *ct, size_t tcount)
{
if (scount != tcount)
return 1;
do {
if (*cs != *ct)
return 1;
cs++;
ct++;
tcount--;
} while (tcount);
return 0;
}
/* Name hashing routines. Initial hash value */
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
#define init_name_hash() 0
@@ -242,7 +222,6 @@ extern void shrink_dcache_for_umount(struct super_block *);
extern int d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
extern struct dentry * d_make_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */

View File

@@ -376,8 +376,10 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
/**
* struct dccp_request_sock - represent DCCP-specific connection request
* @dreq_inet_rsk: structure inherited from
* @dreq_iss: initial sequence number sent on the Response (RFC 4340, 7.1)
* @dreq_isr: initial sequence number received on the Request
* @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1)
* @dreq_gss: greatest sequence number sent (for retransmitted Responses)
* @dreq_isr: initial sequence number received in the first Request
* @dreq_gsr: greatest sequence number received (for retransmitted Request(s))
* @dreq_service: service code present on the Request (there is just one)
* @dreq_featneg: feature negotiation options for this connection
* The following two fields are analogous to the ones in dccp_sock:
@@ -387,7 +389,9 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
struct dccp_request_sock {
struct inet_request_sock dreq_inet_rsk;
__u64 dreq_iss;
__u64 dreq_gss;
__u64 dreq_isr;
__u64 dreq_gsr;
__be32 dreq_service;
struct list_head dreq_featneg;
__u32 dreq_timestamp_echo;

View File

@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <asm/system.h>
#include <linux/bug.h>
struct task_struct;

View File

@@ -86,7 +86,7 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_regset32 *regset);
@@ -208,7 +208,7 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
}
static inline struct dentry *debugfs_create_regset32(const char *name,
mode_t mode, struct dentry *parent,
umode_t mode, struct dentry *parent,
struct debugfs_regset32 *regset)
{
return ERR_PTR(-ENODEV);

View File

@@ -44,6 +44,14 @@ struct devfreq_dev_status {
void *private_data;
};
/*
* The resulting frequency should be at most this. (this bound is the
* least upper bound; thus, the resulting freq should be lower or same)
* If the flag is not set, the resulting frequency should be at most the
* bound (greatest lower bound)
*/
#define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1
/**
* struct devfreq_dev_profile - Devfreq's user device profile
* @initial_freq The operating frequency when devfreq_add_device() is
@@ -54,6 +62,8 @@ struct devfreq_dev_status {
* higher than any operable frequency, set maximum.
* Before returning, target function should set
* freq at the current frequency.
* The "flags" parameter's possible values are
* explained above with "DEVFREQ_FLAG_*" macros.
* @get_dev_status The device should provide the current performance
* status to devfreq, which is used by governors.
* @exit An optional callback that is called when devfreq
@@ -66,7 +76,7 @@ struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
int (*target)(struct device *dev, unsigned long *freq);
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
struct devfreq_dev_status *stat);
void (*exit)(struct device *dev);
@@ -124,6 +134,8 @@ struct devfreq_governor {
* touch this.
* @being_removed a flag to mark that this object is being removed in
* order to prevent trying to remove the object multiple times.
* @min_freq Limit minimum frequency requested by user (0: none)
* @max_freq Limit maximum frequency requested by user (0: none)
*
* This structure stores the devfreq information for a give device.
*
@@ -149,6 +161,9 @@ struct devfreq {
void *data; /* private data for governors */
bool being_removed;
unsigned long min_freq;
unsigned long max_freq;
};
#if defined(CONFIG_PM_DEVFREQ)
@@ -160,7 +175,7 @@ extern int devfreq_remove_device(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
extern struct opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq);
unsigned long *freq, u32 flags);
extern int devfreq_register_opp_notifier(struct device *dev,
struct devfreq *devfreq);
extern int devfreq_unregister_opp_notifier(struct device *dev,
@@ -200,18 +215,18 @@ struct devfreq_simple_ondemand_data {
static struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
struct devfreq_governor *governor,
void *data);
void *data)
{
return NULL;
}
static int devfreq_remove_device(struct devfreq *devfreq);
static int devfreq_remove_device(struct devfreq *devfreq)
{
return 0;
}
static struct opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq)
unsigned long *freq, u32 flags)
{
return -EINVAL;
}

View File

@@ -238,8 +238,6 @@ struct device_driver {
extern int __must_check driver_register(struct device_driver *drv);
extern void driver_unregister(struct device_driver *drv);
extern struct device_driver *get_driver(struct device_driver *drv);
extern void put_driver(struct device_driver *drv);
extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
@@ -264,10 +262,6 @@ extern int __must_check driver_create_file(struct device_driver *driver,
extern void driver_remove_file(struct device_driver *driver,
const struct driver_attribute *attr);
extern int __must_check driver_add_kobj(struct device_driver *drv,
struct kobject *kobj,
const char *fmt, ...);
extern int __must_check driver_for_each_device(struct device_driver *drv,
struct device *start,
void *data,
@@ -946,14 +940,14 @@ int _dev_info(const struct device *dev, const char *fmt, ...)
#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
#if defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG, dev, format, ##arg)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#if defined(CONFIG_DYNAMIC_DEBUG)
#define dev_dbg(dev, format, ...) \
do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG, dev, format, ##arg)
#else
#define dev_dbg(dev, format, arg...) \
({ \
@@ -1007,19 +1001,20 @@ extern long sysfs_deprecated;
* @__driver: driver name
* @__register: register function for this driver type
* @__unregister: unregister function for this driver type
* @...: Additional arguments to be passed to __register and __unregister.
*
* Use this macro to construct bus specific macros for registering
* drivers, and do not use it on its own.
*/
#define module_driver(__driver, __register, __unregister) \
#define module_driver(__driver, __register, __unregister, ...) \
static int __init __driver##_init(void) \
{ \
return __register(&(__driver)); \
return __register(&(__driver) , ##__VA_ARGS__); \
} \
module_init(__driver##_init); \
static void __exit __driver##_exit(void) \
{ \
__unregister(&(__driver)); \
__unregister(&(__driver) , ##__VA_ARGS__); \
} \
module_exit(__driver##_exit);

View File

@@ -13,6 +13,8 @@
enum dma_attr {
DMA_ATTR_WRITE_BARRIER,
DMA_ATTR_WEAK_ORDERING,
DMA_ATTR_WRITE_COMBINE,
DMA_ATTR_NON_CONSISTENT,
DMA_ATTR_MAX,
};

View File

@@ -26,11 +26,12 @@
#include <linux/file.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
struct device;
struct dma_buf;
struct dma_buf_attachment;
@@ -49,6 +50,17 @@ struct dma_buf_attachment;
* @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
* pages.
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
* respectively pin the objet into memory.
* @end_cpu_access: [optional] called after cpu access to flush cashes.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
* This callback must not sleep.
* @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
* This Callback must not sleep.
* @kmap: maps a page from the buffer into kernel address space.
* @kunmap: [optional] unmaps a page from the buffer.
*/
struct dma_buf_ops {
int (*attach)(struct dma_buf *, struct device *,
@@ -63,7 +75,8 @@ struct dma_buf_ops {
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
enum dma_data_direction);
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *);
struct sg_table *,
enum dma_data_direction);
/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
* if the call would block.
*/
@@ -71,6 +84,14 @@ struct dma_buf_ops {
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);
int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
void (*kunmap)(struct dma_buf *, unsigned long, void *);
};
/**
@@ -86,7 +107,7 @@ struct dma_buf {
struct file *file;
struct list_head attachments;
const struct dma_buf_ops *ops;
/* mutex to serialize list manipulation and other ops */
/* mutex to serialize list manipulation and attach/detach */
struct mutex lock;
void *priv;
};
@@ -109,20 +130,43 @@ struct dma_buf_attachment {
void *priv;
};
/**
* get_dma_buf - convenience wrapper for get_file.
* @dmabuf: [in] pointer to dma_buf
*
* Increments the reference count on the dma-buf, needed in case of drivers
* that either need to create additional references to the dmabuf on the
* kernel side. For example, an exporter that needs to keep a dmabuf ptr
* so that subsequent exports don't create a new dmabuf.
*/
static inline void get_dma_buf(struct dma_buf *dmabuf)
{
get_file(dmabuf->file);
}
#ifdef CONFIG_DMA_SHARED_BUFFER
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
size_t size, int flags);
int dma_buf_fd(struct dma_buf *dmabuf);
struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags);
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
void dma_buf_put(struct dma_buf *dmabuf);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
enum dma_data_direction dir);
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
#else
static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -138,13 +182,13 @@ static inline void dma_buf_detach(struct dma_buf *dmabuf,
}
static inline struct dma_buf *dma_buf_export(void *priv,
struct dma_buf_ops *ops,
size_t size, int flags)
const struct dma_buf_ops *ops,
size_t size, int flags)
{
return ERR_PTR(-ENODEV);
}
static inline int dma_buf_fd(struct dma_buf *dmabuf)
static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
return -ENODEV;
}
@@ -166,11 +210,44 @@ static inline struct sg_table *dma_buf_map_attachment(
}
static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg)
struct sg_table *sg, enum dma_data_direction dir)
{
return;
}
static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
size_t start, size_t len,
enum dma_data_direction dir)
{
return -ENODEV;
}
static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
size_t start, size_t len,
enum dma_data_direction dir)
{
}
static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
unsigned long pnum)
{
return NULL;
}
static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
unsigned long pnum, void *vaddr)
{
}
static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
{
return NULL;
}
static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
unsigned long pnum, void *vaddr)
{
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* __DMA_BUF_H__ */

View File

@@ -9,10 +9,15 @@
#include <linux/scatterlist.h>
struct dma_map_ops {
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
void* (*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs);
void (*free)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, struct dma_attrs *attrs);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
@@ -77,7 +82,7 @@ static inline u64 dma_get_mask(struct device *dev)
return DMA_BIT_MASK(32);
}
#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask);
#else
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)

View File

@@ -18,13 +18,15 @@
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef DMAENGINE_H
#define DMAENGINE_H
#ifndef LINUX_DMAENGINE_H
#define LINUX_DMAENGINE_H
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/bug.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/types.h>
#include <asm/page.h>
/**
@@ -257,6 +259,7 @@ struct dma_chan_percpu {
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
* @device_node: used to add this to the device chan list
@@ -268,6 +271,7 @@ struct dma_chan_percpu {
struct dma_chan {
struct dma_device *device;
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
@@ -331,6 +335,9 @@ enum dma_slave_buswidth {
* may or may not be applicable on memory sources.
* @dst_maxburst: same as src_maxburst but for destination target
* mutatis mutandis.
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -357,6 +364,7 @@ struct dma_slave_config {
enum dma_slave_buswidth dst_addr_width;
u32 src_maxburst;
u32 dst_maxburst;
bool device_fc;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -575,10 +583,11 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags);
unsigned long flags, void *context);
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction);
size_t period_len, enum dma_transfer_direction direction,
void *context);
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
@@ -612,7 +621,24 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
struct scatterlist sg;
sg_init_one(&sg, buf, len);
return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
return chan->device->device_prep_slave_sg(chan, &sg, 1,
dir, flags, NULL);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
{
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
dir, flags, NULL);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction dir)
{
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
period_len, dir, NULL);
}
static inline int dmaengine_terminate_all(struct dma_chan *chan)

View File

@@ -31,18 +31,6 @@ struct dw_dma_platform_data {
unsigned char chan_priority;
};
/**
* enum dw_dma_slave_width - DMA slave register access width.
* @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
* @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
* @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
*/
enum dw_dma_slave_width {
DW_DMA_SLAVE_WIDTH_8BIT,
DW_DMA_SLAVE_WIDTH_16BIT,
DW_DMA_SLAVE_WIDTH_32BIT,
};
/* bursts size */
enum dw_dma_msize {
DW_DMA_MSIZE_1,
@@ -55,47 +43,21 @@ enum dw_dma_msize {
DW_DMA_MSIZE_256,
};
/* flow controller */
enum dw_dma_fc {
DW_DMA_FC_D_M2M,
DW_DMA_FC_D_M2P,
DW_DMA_FC_D_P2M,
DW_DMA_FC_D_P2P,
DW_DMA_FC_P_P2M,
DW_DMA_FC_SP_P2P,
DW_DMA_FC_P_M2P,
DW_DMA_FC_DP_P2P,
};
/**
* struct dw_dma_slave - Controller-specific information about a slave
*
* @dma_dev: required DMA master device
* @tx_reg: physical address of data register used for
* memory-to-peripheral transfers
* @rx_reg: physical address of data register used for
* peripheral-to-memory transfers
* @reg_width: peripheral register width
* @cfg_hi: Platform-specific initializer for the CFG_HI register
* @cfg_lo: Platform-specific initializer for the CFG_LO register
* @src_master: src master for transfers on allocated channel.
* @dst_master: dest master for transfers on allocated channel.
* @src_msize: src burst size.
* @dst_msize: dest burst size.
* @fc: flow controller for DMA transfer
*/
struct dw_dma_slave {
struct device *dma_dev;
dma_addr_t tx_reg;
dma_addr_t rx_reg;
enum dw_dma_slave_width reg_width;
u32 cfg_hi;
u32 cfg_lo;
u8 src_master;
u8 dst_master;
u8 src_msize;
u8 dst_msize;
u8 fc;
};
/* Platform-configurable bits in CFG_HI */

View File

@@ -15,20 +15,24 @@ struct _ddebug {
const char *function;
const char *filename;
const char *format;
unsigned int lineno:24;
unsigned int lineno:18;
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
* writes commands to <debugfs>/dynamic_debug/control
*/
#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
#define _DPRINTK_FLAGS_NONE 0
#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
#define _DPRINTK_FLAGS_INCL_MODNAME (1<<1)
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
#define _DPRINTK_FLAGS_DEFAULT 0
#endif
unsigned int flags:8;
char enabled;
} __attribute__((aligned(8)));
@@ -62,21 +66,20 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
.enabled = false, \
}
#define dynamic_pr_debug(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.enabled)) \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
#define dynamic_dev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.enabled)) \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
__dynamic_dev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -84,7 +87,7 @@ do { \
#define dynamic_netdev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.enabled)) \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
__dynamic_netdev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)

View File

@@ -13,7 +13,11 @@
#define _LINUX_EDAC_H_
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
struct device;
#define EDAC_OPSTATE_INVAL -1
#define EDAC_OPSTATE_POLL 0
@@ -66,25 +70,64 @@ enum dev_type {
#define DEV_FLAG_X32 BIT(DEV_X32)
#define DEV_FLAG_X64 BIT(DEV_X64)
/* memory types */
/**
* enum mem_type - memory types. For a more detailed reference, please see
* http://en.wikipedia.org/wiki/DRAM
*
* @MEM_EMPTY Empty csrow
* @MEM_RESERVED: Reserved csrow type
* @MEM_UNKNOWN: Unknown csrow type
* @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995.
* @MEM_EDO: EDO - Extended data out, used on systems up to 1998.
* @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant.
* @MEM_SDR: SDR - Single data rate SDRAM
* http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory
* They use 3 pins for chip select: Pins 0 and 2 are
* for rank 0; pins 1 and 3 are for rank 1, if the memory
* is dual-rank.
* @MEM_RDR: Registered SDR SDRAM
* @MEM_DDR: Double data rate SDRAM
* http://en.wikipedia.org/wiki/DDR_SDRAM
* @MEM_RDDR: Registered Double data rate SDRAM
* This is a variant of the DDR memories.
* A registered memory has a buffer inside it, hiding
* part of the memory details to the memory controller.
* @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers.
* @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F.
* Those memories are labed as "PC2-" instead of "PC" to
* differenciate from DDR.
* @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205
* and JESD206.
* Those memories are accessed per DIMM slot, and not by
* a chip select signal.
* @MEM_RDDR2: Registered DDR2 RAM
* This is a variant of the DDR2 memories.
* @MEM_XDR: Rambus XDR
* It is an evolution of the original RAMBUS memories,
* created to compete with DDR2. Weren't used on any
* x86 arch, but cell_edac PPC memory controller uses it.
* @MEM_DDR3: DDR3 RAM
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
*/
enum mem_type {
MEM_EMPTY = 0, /* Empty csrow */
MEM_RESERVED, /* Reserved csrow type */
MEM_UNKNOWN, /* Unknown csrow type */
MEM_FPM, /* Fast page mode */
MEM_EDO, /* Extended data out */
MEM_BEDO, /* Burst Extended data out */
MEM_SDR, /* Single data rate SDRAM */
MEM_RDR, /* Registered single data rate SDRAM */
MEM_DDR, /* Double data rate SDRAM */
MEM_RDDR, /* Registered Double data rate SDRAM */
MEM_RMBS, /* Rambus DRAM */
MEM_DDR2, /* DDR2 RAM */
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
MEM_XDR, /* Rambus XDR */
MEM_DDR3, /* DDR3 RAM */
MEM_RDDR3, /* Registered DDR3 RAM */
MEM_EMPTY = 0,
MEM_RESERVED,
MEM_UNKNOWN,
MEM_FPM,
MEM_EDO,
MEM_BEDO,
MEM_SDR,
MEM_RDR,
MEM_DDR,
MEM_RDDR,
MEM_RMBS,
MEM_DDR2,
MEM_FB_DDR2,
MEM_RDDR2,
MEM_XDR,
MEM_DDR3,
MEM_RDDR3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -162,8 +205,9 @@ enum scrub_type {
#define OP_OFFLINE 0x300
/*
* There are several things to be aware of that aren't at all obvious:
* Concepts used at the EDAC subsystem
*
* There are several things to be aware of that aren't at all obvious:
*
* SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
*
@@ -172,36 +216,61 @@ enum scrub_type {
* creating a common ground for discussion, terms and their definitions
* will be established.
*
* Memory devices: The individual chip on a memory stick. These devices
* commonly output 4 and 8 bits each. Grouping several
* of these in parallel provides 64 bits which is common
* for a memory stick.
* Memory devices: The individual DRAM chips on a memory stick. These
* devices commonly output 4 and 8 bits each (x4, x8).
* Grouping several of these in parallel provides the
* number of bits that the memory controller expects:
* typically 72 bits, in order to provide 64 bits +
* 8 bits of ECC data.
*
* Memory Stick: A printed circuit board that aggregates multiple
* memory devices in parallel. This is the atomic
* memory component that is purchaseable by Joe consumer
* and loaded into a memory socket.
* memory devices in parallel. In general, this is the
* Field Replaceable Unit (FRU) which gets replaced, in
* the case of excessive errors. Most often it is also
* called DIMM (Dual Inline Memory Module).
*
* Socket: A physical connector on the motherboard that accepts
* a single memory stick.
* Memory Socket: A physical connector on the motherboard that accepts
* a single memory stick. Also called as "slot" on several
* datasheets.
*
* Channel: Set of memory devices on a memory stick that must be
* grouped in parallel with one or more additional
* channels from other memory sticks. This parallel
* grouping of the output from multiple channels are
* necessary for the smallest granularity of memory access.
* Some memory controllers are capable of single channel -
* which means that memory sticks can be loaded
* individually. Other memory controllers are only
* capable of dual channel - which means that memory
* sticks must be loaded as pairs (see "socket set").
* Channel: A memory controller channel, responsible to communicate
* with a group of DIMMs. Each channel has its own
* independent control (command) and data bus, and can
* be used independently or grouped with other channels.
*
* Chip-select row: All of the memory devices that are selected together.
* for a single, minimum grain of memory access.
* This selects all of the parallel memory devices across
* all of the parallel channels. Common chip-select rows
* for single channel are 64 bits, for dual channel 128
* bits.
* Branch: It is typically the highest hierarchy on a
* Fully-Buffered DIMM memory controller.
* Typically, it contains two channels.
* Two channels at the same branch can be used in single
* mode or in lockstep mode.
* When lockstep is enabled, the cacheline is doubled,
* but it generally brings some performance penalty.
* Also, it is generally not possible to point to just one
* memory stick when an error occurs, as the error
* correction code is calculated using two DIMMs instead
* of one. Due to that, it is capable of correcting more
* errors than on single mode.
*
* Single-channel: The data accessed by the memory controller is contained
* into one dimm only. E. g. if the data is 64 bits-wide,
* the data flows to the CPU using one 64 bits parallel
* access.
* Typically used with SDR, DDR, DDR2 and DDR3 memories.
* FB-DIMM and RAMBUS use a different concept for channel,
* so this concept doesn't apply there.
*
* Double-channel: The data size accessed by the memory controller is
* interlaced into two dimms, accessed at the same time.
* E. g. if the DIMM is 64 bits-wide (72 bits with ECC),
* the data flows to the CPU using a 128 bits parallel
* access.
*
* Chip-select row: This is the name of the DRAM signal used to select the
* DRAM ranks to be accessed. Common chip-select rows for
* single channel are 64 bits, for dual channel 128 bits.
* It may not be visible by the memory controller, as some
* DIMM types have a memory buffer that can hide direct
* access to it from the Memory Controller.
*
* Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory.
* Motherboards commonly drive two chip-select pins to
@@ -214,8 +283,8 @@ enum scrub_type {
*
* Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
* A double-sided stick has two chip-select rows which
* access different sets of memory devices. The two
* rows cannot be accessed concurrently. "Double-sided"
* access different sets of memory devices. The two
* rows cannot be accessed concurrently. "Double-sided"
* is irrespective of the memory devices being mounted
* on both sides of the memory stick.
*
@@ -243,10 +312,22 @@ enum scrub_type {
* PS - I enjoyed writing all that about as much as you enjoyed reading it.
*/
struct channel_info {
int chan_idx; /* channel index */
u32 ce_count; /* Correctable Errors for this CHANNEL */
char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
/**
* struct rank_info - contains the information for one DIMM rank
*
* @chan_idx: channel number where the rank is (typically, 0 or 1)
* @ce_count: number of correctable errors for this rank
* @label: DIMM label. Different ranks for the same DIMM should be
* filled, on userspace, with the same label.
* FIXME: The core currently won't enforce it.
* @csrow: A pointer to the chip select row structure (the parent
* structure). The location of the rank is given by
* the (csrow->csrow_idx, chan_idx) vector.
*/
struct rank_info {
int chan_idx;
u32 ce_count;
char label[EDAC_MC_LABEL_LEN + 1];
struct csrow_info *csrow; /* the parent */
};
@@ -270,7 +351,7 @@ struct csrow_info {
/* channel information for this csrow */
u32 nr_channels;
struct channel_info *channels;
struct rank_info *channels;
};
struct mcidev_sysfs_group {

View File

@@ -22,7 +22,6 @@
#include <linux/pstore.h>
#include <asm/page.h>
#include <asm/system.h>
#define EFI_SUCCESS 0
#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
@@ -313,6 +312,16 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
#define EFI_FILE_SYSTEM_GUID \
EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
typedef struct {
efi_guid_t guid;
u64 table;
} efi_config_table_64_t;
typedef struct {
efi_guid_t guid;
u32 table;
} efi_config_table_32_t;
typedef struct {
efi_guid_t guid;
unsigned long table;
@@ -327,6 +336,40 @@ typedef struct {
#define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10))
#define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02))
typedef struct {
efi_table_hdr_t hdr;
u64 fw_vendor; /* physical addr of CHAR16 vendor string */
u32 fw_revision;
u32 __pad1;
u64 con_in_handle;
u64 con_in;
u64 con_out_handle;
u64 con_out;
u64 stderr_handle;
u64 stderr;
u64 runtime;
u64 boottime;
u32 nr_tables;
u32 __pad2;
u64 tables;
} efi_system_table_64_t;
typedef struct {
efi_table_hdr_t hdr;
u32 fw_vendor; /* physical addr of CHAR16 vendor string */
u32 fw_revision;
u32 con_in_handle;
u32 con_in;
u32 con_out_handle;
u32 con_out;
u32 stderr_handle;
u32 stderr;
u32 runtime;
u32 boottime;
u32 nr_tables;
u32 tables;
} efi_system_table_32_t;
typedef struct {
efi_table_hdr_t hdr;
unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
@@ -497,6 +540,7 @@ extern int __init efi_setup_pcdp_console(char *);
#ifdef CONFIG_EFI
# ifdef CONFIG_X86
extern int efi_enabled;
extern bool efi_64bit;
# else
# define efi_enabled 1
# endif

View File

@@ -6,6 +6,7 @@
#include <linux/time.h>
#ifdef __KERNEL__
#include <linux/user.h>
#include <linux/bug.h>
#endif
#include <linux/ptrace.h>
#include <linux/elf.h>

View File

@@ -16,6 +16,7 @@
#define ERESTARTNOHAND 514 /* restart if no handler.. */
#define ENOIOCTLCMD 515 /* No ioctl command */
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
#define EPROBE_DEFER 517 /* Driver requests probe retry */
/* Defined for the NFSv3 protocol */
#define EBADHANDLE 521 /* Illegal NFS file handle */

View File

@@ -140,17 +140,18 @@ static inline void random_ether_addr(u8 *addr)
}
/**
* dev_hw_addr_random - Create random MAC and set device flag
* eth_hw_addr_random - Generate software assigned random Ethernet and
* set device flag
* @dev: pointer to net_device structure
* @hwaddr: Pointer to a six-byte array containing the Ethernet address
*
* Generate random MAC to be used by a device and set addr_assign_type
* so the state can be read by sysfs and be used by udev.
* Generate a random Ethernet address (MAC) to be used by a net device
* and set addr_assign_type so the state can be read by sysfs and be
* used by userspace.
*/
static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr)
static inline void eth_hw_addr_random(struct net_device *dev)
{
dev->addr_assign_type |= NET_ADDR_RANDOM;
random_ether_addr(hwaddr);
random_ether_addr(dev->dev_addr);
}
/**

View File

@@ -30,10 +30,15 @@ struct ethtool_cmd {
* access it */
__u8 duplex; /* Duplex, half or full */
__u8 port; /* Which connector port */
__u8 phy_address;
__u8 phy_address; /* MDIO PHY address (PRTAD for clause 45).
* May be read-only or read-write
* depending on the driver.
*/
__u8 transceiver; /* Which transceiver to use */
__u8 autoneg; /* Enable or disable autonegotiation */
__u8 mdio_support;
__u8 mdio_support; /* MDIO protocols supported. Read-only.
* Not set by all drivers.
*/
__u32 maxtxpkt; /* Tx pkts before generating tx int */
__u32 maxrxpkt; /* Rx pkts before generating rx int */
__u16 speed_hi; /* The forced speed (upper
@@ -59,6 +64,20 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
return (ep->speed_hi << 16) | ep->speed;
}
/* Device supports clause 22 register access to PHY or peripherals
* using the interface defined in <linux/mii.h>. This should not be
* set if there are known to be no such peripherals present or if
* the driver only emulates clause 22 registers for compatibility.
*/
#define ETH_MDIO_SUPPORTS_C22 1
/* Device supports clause 45 register access to PHY or peripherals
* using the interface defined in <linux/mii.h> and <linux/mdio.h>.
* This should not be set if there are known to be no such peripherals
* present.
*/
#define ETH_MDIO_SUPPORTS_C45 2
#define ETHTOOL_FWVERS_LEN 32
#define ETHTOOL_BUSINFO_LEN 32
/* these strings are set to whatever the driver author decides... */
@@ -877,8 +896,7 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
* hold the RTNL, except that for @get_drvinfo the caller may or may
* not hold the RTNL.
* hold the RTNL lock.
*
* See the structures used by these operations for further documentation.
*

View File

@@ -18,574 +18,25 @@
#include <linux/types.h>
#include <linux/magic.h>
#include <linux/fs.h>
/*
* The second extended filesystem constants/structures
*/
/*
* Define EXT2FS_DEBUG to produce debug messages
*/
#undef EXT2FS_DEBUG
/*
* Define EXT2_RESERVATION to reserve data blocks for expanding files
*/
#define EXT2_DEFAULT_RESERVE_BLOCKS 8
/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
#define EXT2_MAX_RESERVE_BLOCKS 1027
#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
/*
* The second extended file system version
*/
#define EXT2FS_DATE "95/08/09"
#define EXT2FS_VERSION "0.5b"
/*
* Debug code
*/
#ifdef EXT2FS_DEBUG
# define ext2_debug(f, a...) { \
printk ("EXT2-fs DEBUG (%s, %d): %s:", \
__FILE__, __LINE__, __func__); \
printk (f, ## a); \
}
#else
# define ext2_debug(f, a...) /**/
#endif
/*
* Special inode numbers
*/
#define EXT2_BAD_INO 1 /* Bad blocks inode */
#define EXT2_ROOT_INO 2 /* Root inode */
#define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */
#define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */
/* First non-reserved inode for old ext2 filesystems */
#define EXT2_GOOD_OLD_FIRST_INO 11
#ifdef __KERNEL__
#include <linux/ext2_fs_sb.h>
static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
#else
/* Assume that user mode programs are passing in an ext2fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
* macros from user land. */
#define EXT2_SB(sb) (sb)
#endif
#define EXT2_NAME_LEN 255
/*
* Maximal count of links to a file
*/
#define EXT2_LINK_MAX 32000
/*
* Macro-instructions used to manage several block sizes
*/
#define EXT2_MIN_BLOCK_SIZE 1024
#define EXT2_MAX_BLOCK_SIZE 4096
#define EXT2_MIN_BLOCK_LOG_SIZE 10
#ifdef __KERNEL__
# define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
# define EXT2_BLOCK_SIZE(s) (EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size)
#endif
#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
#ifdef __KERNEL__
# define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
#else
# define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
#endif
#ifdef __KERNEL__
#define EXT2_ADDR_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_addr_per_block_bits)
#define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size)
#define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
#else
#define EXT2_INODE_SIZE(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
EXT2_GOOD_OLD_INODE_SIZE : \
(s)->s_inode_size)
#define EXT2_FIRST_INO(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
EXT2_GOOD_OLD_FIRST_INO : \
(s)->s_first_ino)
#endif
#define EXT2_SB_MAGIC_OFFSET 0x38
#define EXT2_SB_BLOCKS_OFFSET 0x04
#define EXT2_SB_BSIZE_OFFSET 0x18
/*
* Macro-instructions used to manage fragments
*/
#define EXT2_MIN_FRAG_SIZE 1024
#define EXT2_MAX_FRAG_SIZE 4096
#define EXT2_MIN_FRAG_LOG_SIZE 10
#ifdef __KERNEL__
# define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
# define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
#else
# define EXT2_FRAG_SIZE(s) (EXT2_MIN_FRAG_SIZE << (s)->s_log_frag_size)
# define EXT2_FRAGS_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / EXT2_FRAG_SIZE(s))
#endif
/*
* Structure of a blocks group descriptor
*/
struct ext2_group_desc
static inline u64 ext2_image_size(void *ext2_sb)
{
__le32 bg_block_bitmap; /* Blocks bitmap block */
__le32 bg_inode_bitmap; /* Inodes bitmap block */
__le32 bg_inode_table; /* Inodes table block */
__le16 bg_free_blocks_count; /* Free blocks count */
__le16 bg_free_inodes_count; /* Free inodes count */
__le16 bg_used_dirs_count; /* Directories count */
__le16 bg_pad;
__le32 bg_reserved[3];
};
/*
* Macro-instructions used to manage group descriptors
*/
#ifdef __KERNEL__
# define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->s_blocks_per_group)
# define EXT2_DESC_PER_BLOCK(s) (EXT2_SB(s)->s_desc_per_block)
# define EXT2_INODES_PER_GROUP(s) (EXT2_SB(s)->s_inodes_per_group)
# define EXT2_DESC_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_desc_per_block_bits)
#else
# define EXT2_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
# define EXT2_DESC_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_group_desc))
# define EXT2_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
#endif
/*
* Constants relative to the data blocks
*/
#define EXT2_NDIR_BLOCKS 12
#define EXT2_IND_BLOCK EXT2_NDIR_BLOCKS
#define EXT2_DIND_BLOCK (EXT2_IND_BLOCK + 1)
#define EXT2_TIND_BLOCK (EXT2_DIND_BLOCK + 1)
#define EXT2_N_BLOCKS (EXT2_TIND_BLOCK + 1)
/*
* Inode flags (GETFLAGS/SETFLAGS)
*/
#define EXT2_SECRM_FL FS_SECRM_FL /* Secure deletion */
#define EXT2_UNRM_FL FS_UNRM_FL /* Undelete */
#define EXT2_COMPR_FL FS_COMPR_FL /* Compress file */
#define EXT2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
#define EXT2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
#define EXT2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
#define EXT2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
#define EXT2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
/* Reserved for compression usage... */
#define EXT2_DIRTY_FL FS_DIRTY_FL
#define EXT2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
#define EXT2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
#define EXT2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
/* End compression flags --- maybe not all used */
#define EXT2_BTREE_FL FS_BTREE_FL /* btree format dir */
#define EXT2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
#define EXT2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
#define EXT2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
#define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
#define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
#define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
#define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
/* Flags that should be inherited by new inodes from their parent. */
#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
EXT2_SYNC_FL | EXT2_NODUMP_FL |\
EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
/* Flags that are appropriate for regular files (all but dir-specific ones). */
#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
/* Flags that are appropriate for non-directories/regular files. */
#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
{
if (S_ISDIR(mode))
return flags;
else if (S_ISREG(mode))
return flags & EXT2_REG_FLMASK;
else
return flags & EXT2_OTHER_FLMASK;
__u8 *p = ext2_sb;
if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC))
return 0;
return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) <<
le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET));
}
/*
* ioctl commands
*/
#define EXT2_IOC_GETFLAGS FS_IOC_GETFLAGS
#define EXT2_IOC_SETFLAGS FS_IOC_SETFLAGS
#define EXT2_IOC_GETVERSION FS_IOC_GETVERSION
#define EXT2_IOC_SETVERSION FS_IOC_SETVERSION
#define EXT2_IOC_GETRSVSZ _IOR('f', 5, long)
#define EXT2_IOC_SETRSVSZ _IOW('f', 6, long)
/*
* ioctl commands in 32 bit emulation
*/
#define EXT2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
#define EXT2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
#define EXT2_IOC32_GETVERSION FS_IOC32_GETVERSION
#define EXT2_IOC32_SETVERSION FS_IOC32_SETVERSION
/*
* Structure of an inode on the disk
*/
struct ext2_inode {
__le16 i_mode; /* File mode */
__le16 i_uid; /* Low 16 bits of Owner Uid */
__le32 i_size; /* Size in bytes */
__le32 i_atime; /* Access time */
__le32 i_ctime; /* Creation time */
__le32 i_mtime; /* Modification time */
__le32 i_dtime; /* Deletion Time */
__le16 i_gid; /* Low 16 bits of Group Id */
__le16 i_links_count; /* Links count */
__le32 i_blocks; /* Blocks count */
__le32 i_flags; /* File flags */
union {
struct {
__le32 l_i_reserved1;
} linux1;
struct {
__le32 h_i_translator;
} hurd1;
struct {
__le32 m_i_reserved1;
} masix1;
} osd1; /* OS dependent 1 */
__le32 i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
__le32 i_generation; /* File version (for NFS) */
__le32 i_file_acl; /* File ACL */
__le32 i_dir_acl; /* Directory ACL */
__le32 i_faddr; /* Fragment address */
union {
struct {
__u8 l_i_frag; /* Fragment number */
__u8 l_i_fsize; /* Fragment size */
__u16 i_pad1;
__le16 l_i_uid_high; /* these 2 fields */
__le16 l_i_gid_high; /* were reserved2[0] */
__u32 l_i_reserved2;
} linux2;
struct {
__u8 h_i_frag; /* Fragment number */
__u8 h_i_fsize; /* Fragment size */
__le16 h_i_mode_high;
__le16 h_i_uid_high;
__le16 h_i_gid_high;
__le32 h_i_author;
} hurd2;
struct {
__u8 m_i_frag; /* Fragment number */
__u8 m_i_fsize; /* Fragment size */
__u16 m_pad1;
__u32 m_i_reserved2[2];
} masix2;
} osd2; /* OS dependent 2 */
};
#define i_size_high i_dir_acl
#if defined(__KERNEL__) || defined(__linux__)
#define i_reserved1 osd1.linux1.l_i_reserved1
#define i_frag osd2.linux2.l_i_frag
#define i_fsize osd2.linux2.l_i_fsize
#define i_uid_low i_uid
#define i_gid_low i_gid
#define i_uid_high osd2.linux2.l_i_uid_high
#define i_gid_high osd2.linux2.l_i_gid_high
#define i_reserved2 osd2.linux2.l_i_reserved2
#endif
#ifdef __hurd__
#define i_translator osd1.hurd1.h_i_translator
#define i_frag osd2.hurd2.h_i_frag
#define i_fsize osd2.hurd2.h_i_fsize
#define i_uid_high osd2.hurd2.h_i_uid_high
#define i_gid_high osd2.hurd2.h_i_gid_high
#define i_author osd2.hurd2.h_i_author
#endif
#ifdef __masix__
#define i_reserved1 osd1.masix1.m_i_reserved1
#define i_frag osd2.masix2.m_i_frag
#define i_fsize osd2.masix2.m_i_fsize
#define i_reserved2 osd2.masix2.m_i_reserved2
#endif
/*
* File system states
*/
#define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */
#define EXT2_ERROR_FS 0x0002 /* Errors detected */
/*
* Mount flags
*/
#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */
#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */
#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */
#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */
#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
#define test_opt(sb, opt) (EXT2_SB(sb)->s_mount_opt & \
EXT2_MOUNT_##opt)
/*
* Maximal mount counts between two filesystem checks
*/
#define EXT2_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
#define EXT2_DFL_CHECKINTERVAL 0 /* Don't use interval check */
/*
* Behaviour when detecting errors
*/
#define EXT2_ERRORS_CONTINUE 1 /* Continue execution */
#define EXT2_ERRORS_RO 2 /* Remount fs read-only */
#define EXT2_ERRORS_PANIC 3 /* Panic */
#define EXT2_ERRORS_DEFAULT EXT2_ERRORS_CONTINUE
/*
* Structure of the super block
*/
struct ext2_super_block {
__le32 s_inodes_count; /* Inodes count */
__le32 s_blocks_count; /* Blocks count */
__le32 s_r_blocks_count; /* Reserved blocks count */
__le32 s_free_blocks_count; /* Free blocks count */
__le32 s_free_inodes_count; /* Free inodes count */
__le32 s_first_data_block; /* First Data Block */
__le32 s_log_block_size; /* Block size */
__le32 s_log_frag_size; /* Fragment size */
__le32 s_blocks_per_group; /* # Blocks per group */
__le32 s_frags_per_group; /* # Fragments per group */
__le32 s_inodes_per_group; /* # Inodes per group */
__le32 s_mtime; /* Mount time */
__le32 s_wtime; /* Write time */
__le16 s_mnt_count; /* Mount count */
__le16 s_max_mnt_count; /* Maximal mount count */
__le16 s_magic; /* Magic signature */
__le16 s_state; /* File system state */
__le16 s_errors; /* Behaviour when detecting errors */
__le16 s_minor_rev_level; /* minor revision level */
__le32 s_lastcheck; /* time of last check */
__le32 s_checkinterval; /* max. time between checks */
__le32 s_creator_os; /* OS */
__le32 s_rev_level; /* Revision level */
__le16 s_def_resuid; /* Default uid for reserved blocks */
__le16 s_def_resgid; /* Default gid for reserved blocks */
/*
* These fields are for EXT2_DYNAMIC_REV superblocks only.
*
* Note: the difference between the compatible feature set and
* the incompatible feature set is that if there is a bit set
* in the incompatible feature set that the kernel doesn't
* know about, it should refuse to mount the filesystem.
*
* e2fsck's requirements are more strict; if it doesn't know
* about a feature in either the compatible or incompatible
* feature set, it must abort and not try to meddle with
* things it doesn't understand...
*/
__le32 s_first_ino; /* First non-reserved inode */
__le16 s_inode_size; /* size of inode structure */
__le16 s_block_group_nr; /* block group # of this superblock */
__le32 s_feature_compat; /* compatible feature set */
__le32 s_feature_incompat; /* incompatible feature set */
__le32 s_feature_ro_compat; /* readonly-compatible feature set */
__u8 s_uuid[16]; /* 128-bit uuid for volume */
char s_volume_name[16]; /* volume name */
char s_last_mounted[64]; /* directory where last mounted */
__le32 s_algorithm_usage_bitmap; /* For compression */
/*
* Performance hints. Directory preallocation should only
* happen if the EXT2_COMPAT_PREALLOC flag is on.
*/
__u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
__u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
__u16 s_padding1;
/*
* Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
*/
__u8 s_journal_uuid[16]; /* uuid of journal superblock */
__u32 s_journal_inum; /* inode number of journal file */
__u32 s_journal_dev; /* device number of journal file */
__u32 s_last_orphan; /* start of list of inodes to delete */
__u32 s_hash_seed[4]; /* HTREE hash seed */
__u8 s_def_hash_version; /* Default hash version to use */
__u8 s_reserved_char_pad;
__u16 s_reserved_word_pad;
__le32 s_default_mount_opts;
__le32 s_first_meta_bg; /* First metablock block group */
__u32 s_reserved[190]; /* Padding to the end of the block */
};
/*
* Codes for operating systems
*/
#define EXT2_OS_LINUX 0
#define EXT2_OS_HURD 1
#define EXT2_OS_MASIX 2
#define EXT2_OS_FREEBSD 3
#define EXT2_OS_LITES 4
/*
* Revision levels
*/
#define EXT2_GOOD_OLD_REV 0 /* The good old (original) format */
#define EXT2_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */
#define EXT2_CURRENT_REV EXT2_GOOD_OLD_REV
#define EXT2_MAX_SUPP_REV EXT2_DYNAMIC_REV
#define EXT2_GOOD_OLD_INODE_SIZE 128
/*
* Feature set definitions
*/
#define EXT2_HAS_COMPAT_FEATURE(sb,mask) \
( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \
( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \
( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
#define EXT2_SET_COMPAT_FEATURE(sb,mask) \
EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask) \
EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
#define EXT2_SET_INCOMPAT_FEATURE(sb,mask) \
EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask) \
EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask) \
EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
#define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001
#define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002
#define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004
#define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008
#define EXT2_FEATURE_COMPAT_RESIZE_INO 0x0010
#define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020
#define EXT2_FEATURE_COMPAT_ANY 0xffffffff
#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
#define EXT2_FEATURE_RO_COMPAT_ANY 0xffffffff
#define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001
#define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002
#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004
#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008
#define EXT2_FEATURE_INCOMPAT_META_BG 0x0010
#define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff
#define EXT2_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
#define EXT2_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \
EXT2_FEATURE_INCOMPAT_META_BG)
#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED ~EXT2_FEATURE_RO_COMPAT_SUPP
#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED ~EXT2_FEATURE_INCOMPAT_SUPP
/*
* Default values for user and/or group using reserved blocks
*/
#define EXT2_DEF_RESUID 0
#define EXT2_DEF_RESGID 0
/*
* Default mount options
*/
#define EXT2_DEFM_DEBUG 0x0001
#define EXT2_DEFM_BSDGROUPS 0x0002
#define EXT2_DEFM_XATTR_USER 0x0004
#define EXT2_DEFM_ACL 0x0008
#define EXT2_DEFM_UID16 0x0010
/* Not used by ext2, but reserved for use by ext3 */
#define EXT3_DEFM_JMODE 0x0060
#define EXT3_DEFM_JMODE_DATA 0x0020
#define EXT3_DEFM_JMODE_ORDERED 0x0040
#define EXT3_DEFM_JMODE_WBACK 0x0060
/*
* Structure of a directory entry
*/
#define EXT2_NAME_LEN 255
struct ext2_dir_entry {
__le32 inode; /* Inode number */
__le16 rec_len; /* Directory entry length */
__le16 name_len; /* Name length */
char name[EXT2_NAME_LEN]; /* File name */
};
/*
* The new version of the directory entry. Since EXT2 structures are
* stored in intel byte order, and the name_len field could never be
* bigger than 255 chars, it's safe to reclaim the extra byte for the
* file_type field.
*/
struct ext2_dir_entry_2 {
__le32 inode; /* Inode number */
__le16 rec_len; /* Directory entry length */
__u8 name_len; /* Name length */
__u8 file_type;
char name[EXT2_NAME_LEN]; /* File name */
};
/*
* Ext2 directory file types. Only the low 3 bits are used. The
* other bits are reserved for now.
*/
enum {
EXT2_FT_UNKNOWN = 0,
EXT2_FT_REG_FILE = 1,
EXT2_FT_DIR = 2,
EXT2_FT_CHRDEV = 3,
EXT2_FT_BLKDEV = 4,
EXT2_FT_FIFO = 5,
EXT2_FT_SOCK = 6,
EXT2_FT_SYMLINK = 7,
EXT2_FT_MAX
};
/*
* EXT2_DIR_PAD defines the directory entries boundaries
*
* NOTE: It must be a multiple of 4
*/
#define EXT2_DIR_PAD 4
#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1)
#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \
~EXT2_DIR_ROUND)
#define EXT2_MAX_REC_LEN ((1<<16)-1)
#endif /* _LINUX_EXT2_FS_H */

View File

@@ -1,126 +0,0 @@
/*
* linux/include/linux/ext2_fs_sb.h
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/include/linux/minix_fs_sb.h
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#ifndef _LINUX_EXT2_FS_SB
#define _LINUX_EXT2_FS_SB
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
#include <linux/rbtree.h>
/* XXX Here for now... not interested in restructing headers JUST now */
/* data type for block offset of block group */
typedef int ext2_grpblk_t;
/* data type for filesystem-wide blocks number */
typedef unsigned long ext2_fsblk_t;
#define E2FSBLK "%lu"
struct ext2_reserve_window {
ext2_fsblk_t _rsv_start; /* First byte reserved */
ext2_fsblk_t _rsv_end; /* Last byte reserved or 0 */
};
struct ext2_reserve_window_node {
struct rb_node rsv_node;
__u32 rsv_goal_size;
__u32 rsv_alloc_hit;
struct ext2_reserve_window rsv_window;
};
struct ext2_block_alloc_info {
/* information about reservation window */
struct ext2_reserve_window_node rsv_window_node;
/*
* was i_next_alloc_block in ext2_inode_info
* is the logical (file-relative) number of the
* most-recently-allocated block in this file.
* We use this for detecting linearly ascending allocation requests.
*/
__u32 last_alloc_logical_block;
/*
* Was i_next_alloc_goal in ext2_inode_info
* is the *physical* companion to i_next_alloc_block.
* it the the physical block number of the block which was most-recentl
* allocated to this file. This give us the goal (target) for the next
* allocation when we detect linearly ascending requests.
*/
ext2_fsblk_t last_alloc_physical_block;
};
#define rsv_start rsv_window._rsv_start
#define rsv_end rsv_window._rsv_end
/*
* second extended-fs super-block data in memory
*/
struct ext2_sb_info {
unsigned long s_frag_size; /* Size of a fragment in bytes */
unsigned long s_frags_per_block;/* Number of fragments per block */
unsigned long s_inodes_per_block;/* Number of inodes per block */
unsigned long s_frags_per_group;/* Number of fragments in a group */
unsigned long s_blocks_per_group;/* Number of blocks in a group */
unsigned long s_inodes_per_group;/* Number of inodes in a group */
unsigned long s_itb_per_group; /* Number of inode table blocks per group */
unsigned long s_gdb_count; /* Number of group descriptor blocks */
unsigned long s_desc_per_block; /* Number of group descriptors per block */
unsigned long s_groups_count; /* Number of groups in the fs */
unsigned long s_overhead_last; /* Last calculated overhead */
unsigned long s_blocks_last; /* Last seen block count */
struct buffer_head * s_sbh; /* Buffer containing the super block */
struct ext2_super_block * s_es; /* Pointer to the super block in the buffer */
struct buffer_head ** s_group_desc;
unsigned long s_mount_opt;
unsigned long s_sb_block;
uid_t s_resuid;
gid_t s_resgid;
unsigned short s_mount_state;
unsigned short s_pad;
int s_addr_per_block_bits;
int s_desc_per_block_bits;
int s_inode_size;
int s_first_ino;
spinlock_t s_next_gen_lock;
u32 s_next_generation;
unsigned long s_dir_count;
u8 *s_debts;
struct percpu_counter s_freeblocks_counter;
struct percpu_counter s_freeinodes_counter;
struct percpu_counter s_dirs_counter;
struct blockgroup_lock *s_blockgroup_lock;
/* root of the per fs reservation window tree */
spinlock_t s_rsv_window_lock;
struct rb_root s_rsv_window_root;
struct ext2_reserve_window_node s_rsv_window_head;
/*
* s_lock protects against concurrent modifications of s_mount_state,
* s_blocks_last, s_overhead_last and the content of superblock's
* buffer pointed to by sbi->s_es.
*
* Note: It is used in ext2_show_options() to provide a consistent view
* of the mount options.
*/
spinlock_t s_lock;
};
static inline spinlock_t *
sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
{
return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
}
#endif /* _LINUX_EXT2_FS_SB */

View File

@@ -1,979 +0,0 @@
/*
* linux/include/linux/ext3_fs.h
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/include/linux/minix_fs.h
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#ifndef _LINUX_EXT3_FS_H
#define _LINUX_EXT3_FS_H
#include <linux/types.h>
#include <linux/magic.h>
/*
* The second extended filesystem constants/structures
*/
/*
* Define EXT3FS_DEBUG to produce debug messages
*/
#undef EXT3FS_DEBUG
/*
* Define EXT3_RESERVATION to reserve data blocks for expanding files
*/
#define EXT3_DEFAULT_RESERVE_BLOCKS 8
/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
#define EXT3_MAX_RESERVE_BLOCKS 1027
#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
/*
* Debug code
*/
#ifdef EXT3FS_DEBUG
#define ext3_debug(f, a...) \
do { \
printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:", \
__FILE__, __LINE__, __func__); \
printk (KERN_DEBUG f, ## a); \
} while (0)
#else
#define ext3_debug(f, a...) do {} while (0)
#endif
/*
* Special inodes numbers
*/
#define EXT3_BAD_INO 1 /* Bad blocks inode */
#define EXT3_ROOT_INO 2 /* Root inode */
#define EXT3_BOOT_LOADER_INO 5 /* Boot loader inode */
#define EXT3_UNDEL_DIR_INO 6 /* Undelete directory inode */
#define EXT3_RESIZE_INO 7 /* Reserved group descriptors inode */
#define EXT3_JOURNAL_INO 8 /* Journal inode */
/* First non-reserved inode for old ext3 filesystems */
#define EXT3_GOOD_OLD_FIRST_INO 11
/*
* Maximal count of links to a file
*/
#define EXT3_LINK_MAX 32000
/*
* Macro-instructions used to manage several block sizes
*/
#define EXT3_MIN_BLOCK_SIZE 1024
#define EXT3_MAX_BLOCK_SIZE 65536
#define EXT3_MIN_BLOCK_LOG_SIZE 10
#ifdef __KERNEL__
# define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
# define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
#endif
#define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
#ifdef __KERNEL__
# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
#else
# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
#endif
#ifdef __KERNEL__
#define EXT3_ADDR_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_addr_per_block_bits)
#define EXT3_INODE_SIZE(s) (EXT3_SB(s)->s_inode_size)
#define EXT3_FIRST_INO(s) (EXT3_SB(s)->s_first_ino)
#else
#define EXT3_INODE_SIZE(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
EXT3_GOOD_OLD_INODE_SIZE : \
(s)->s_inode_size)
#define EXT3_FIRST_INO(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
EXT3_GOOD_OLD_FIRST_INO : \
(s)->s_first_ino)
#endif
/*
* Macro-instructions used to manage fragments
*/
#define EXT3_MIN_FRAG_SIZE 1024
#define EXT3_MAX_FRAG_SIZE 4096
#define EXT3_MIN_FRAG_LOG_SIZE 10
#ifdef __KERNEL__
# define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size)
# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
#else
# define EXT3_FRAG_SIZE(s) (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
#endif
/*
* Structure of a blocks group descriptor
*/
struct ext3_group_desc
{
__le32 bg_block_bitmap; /* Blocks bitmap block */
__le32 bg_inode_bitmap; /* Inodes bitmap block */
__le32 bg_inode_table; /* Inodes table block */
__le16 bg_free_blocks_count; /* Free blocks count */
__le16 bg_free_inodes_count; /* Free inodes count */
__le16 bg_used_dirs_count; /* Directories count */
__u16 bg_pad;
__le32 bg_reserved[3];
};
/*
* Macro-instructions used to manage group descriptors
*/
#ifdef __KERNEL__
# define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group)
# define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block)
# define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group)
# define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
#else
# define EXT3_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
# define EXT3_DESC_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
# define EXT3_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
#endif
/*
* Constants relative to the data blocks
*/
#define EXT3_NDIR_BLOCKS 12
#define EXT3_IND_BLOCK EXT3_NDIR_BLOCKS
#define EXT3_DIND_BLOCK (EXT3_IND_BLOCK + 1)
#define EXT3_TIND_BLOCK (EXT3_DIND_BLOCK + 1)
#define EXT3_N_BLOCKS (EXT3_TIND_BLOCK + 1)
/*
* Inode flags
*/
#define EXT3_SECRM_FL 0x00000001 /* Secure deletion */
#define EXT3_UNRM_FL 0x00000002 /* Undelete */
#define EXT3_COMPR_FL 0x00000004 /* Compress file */
#define EXT3_SYNC_FL 0x00000008 /* Synchronous updates */
#define EXT3_IMMUTABLE_FL 0x00000010 /* Immutable file */
#define EXT3_APPEND_FL 0x00000020 /* writes to file may only append */
#define EXT3_NODUMP_FL 0x00000040 /* do not dump file */
#define EXT3_NOATIME_FL 0x00000080 /* do not update atime */
/* Reserved for compression usage... */
#define EXT3_DIRTY_FL 0x00000100
#define EXT3_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */
#define EXT3_NOCOMPR_FL 0x00000400 /* Don't compress */
#define EXT3_ECOMPR_FL 0x00000800 /* Compression error */
/* End compression flags --- maybe not all used */
#define EXT3_INDEX_FL 0x00001000 /* hash-indexed directory */
#define EXT3_IMAGIC_FL 0x00002000 /* AFS directory */
#define EXT3_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */
#define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */
#define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
#define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
#define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
/* Flags that should be inherited by new inodes from their parent. */
#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
EXT3_SYNC_FL | EXT3_NODUMP_FL |\
EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
/* Flags that are appropriate for regular files (all but dir-specific ones). */
#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
/* Flags that are appropriate for non-directories/regular files. */
#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
{
if (S_ISDIR(mode))
return flags;
else if (S_ISREG(mode))
return flags & EXT3_REG_FLMASK;
else
return flags & EXT3_OTHER_FLMASK;
}
/* Used to pass group descriptor data when online resize is done */
struct ext3_new_group_input {
__u32 group; /* Group number for this data */
__u32 block_bitmap; /* Absolute block number of block bitmap */
__u32 inode_bitmap; /* Absolute block number of inode bitmap */
__u32 inode_table; /* Absolute block number of inode table start */
__u32 blocks_count; /* Total number of blocks in this group */
__u16 reserved_blocks; /* Number of reserved blocks in this group */
__u16 unused;
};
/* The struct ext3_new_group_input in kernel space, with free_blocks_count */
struct ext3_new_group_data {
__u32 group;
__u32 block_bitmap;
__u32 inode_bitmap;
__u32 inode_table;
__u32 blocks_count;
__u16 reserved_blocks;
__u16 unused;
__u32 free_blocks_count;
};
/*
* ioctl commands
*/
#define EXT3_IOC_GETFLAGS FS_IOC_GETFLAGS
#define EXT3_IOC_SETFLAGS FS_IOC_SETFLAGS
#define EXT3_IOC_GETVERSION _IOR('f', 3, long)
#define EXT3_IOC_SETVERSION _IOW('f', 4, long)
#define EXT3_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
#define EXT3_IOC_GROUP_ADD _IOW('f', 8,struct ext3_new_group_input)
#define EXT3_IOC_GETVERSION_OLD FS_IOC_GETVERSION
#define EXT3_IOC_SETVERSION_OLD FS_IOC_SETVERSION
#ifdef CONFIG_JBD_DEBUG
#define EXT3_IOC_WAIT_FOR_READONLY _IOR('f', 99, long)
#endif
#define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
#define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
/*
* ioctl commands in 32 bit emulation
*/
#define EXT3_IOC32_GETFLAGS FS_IOC32_GETFLAGS
#define EXT3_IOC32_SETFLAGS FS_IOC32_SETFLAGS
#define EXT3_IOC32_GETVERSION _IOR('f', 3, int)
#define EXT3_IOC32_SETVERSION _IOW('f', 4, int)
#define EXT3_IOC32_GETRSVSZ _IOR('f', 5, int)
#define EXT3_IOC32_SETRSVSZ _IOW('f', 6, int)
#define EXT3_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
#ifdef CONFIG_JBD_DEBUG
#define EXT3_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int)
#endif
#define EXT3_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
#define EXT3_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
/*
* Mount options
*/
struct ext3_mount_options {
unsigned long s_mount_opt;
uid_t s_resuid;
gid_t s_resgid;
unsigned long s_commit_interval;
#ifdef CONFIG_QUOTA
int s_jquota_fmt;
char *s_qf_names[MAXQUOTAS];
#endif
};
/*
* Structure of an inode on the disk
*/
struct ext3_inode {
__le16 i_mode; /* File mode */
__le16 i_uid; /* Low 16 bits of Owner Uid */
__le32 i_size; /* Size in bytes */
__le32 i_atime; /* Access time */
__le32 i_ctime; /* Creation time */
__le32 i_mtime; /* Modification time */
__le32 i_dtime; /* Deletion Time */
__le16 i_gid; /* Low 16 bits of Group Id */
__le16 i_links_count; /* Links count */
__le32 i_blocks; /* Blocks count */
__le32 i_flags; /* File flags */
union {
struct {
__u32 l_i_reserved1;
} linux1;
struct {
__u32 h_i_translator;
} hurd1;
struct {
__u32 m_i_reserved1;
} masix1;
} osd1; /* OS dependent 1 */
__le32 i_block[EXT3_N_BLOCKS];/* Pointers to blocks */
__le32 i_generation; /* File version (for NFS) */
__le32 i_file_acl; /* File ACL */
__le32 i_dir_acl; /* Directory ACL */
__le32 i_faddr; /* Fragment address */
union {
struct {
__u8 l_i_frag; /* Fragment number */
__u8 l_i_fsize; /* Fragment size */
__u16 i_pad1;
__le16 l_i_uid_high; /* these 2 fields */
__le16 l_i_gid_high; /* were reserved2[0] */
__u32 l_i_reserved2;
} linux2;
struct {
__u8 h_i_frag; /* Fragment number */
__u8 h_i_fsize; /* Fragment size */
__u16 h_i_mode_high;
__u16 h_i_uid_high;
__u16 h_i_gid_high;
__u32 h_i_author;
} hurd2;
struct {
__u8 m_i_frag; /* Fragment number */
__u8 m_i_fsize; /* Fragment size */
__u16 m_pad1;
__u32 m_i_reserved2[2];
} masix2;
} osd2; /* OS dependent 2 */
__le16 i_extra_isize;
__le16 i_pad1;
};
#define i_size_high i_dir_acl
#if defined(__KERNEL__) || defined(__linux__)
#define i_reserved1 osd1.linux1.l_i_reserved1
#define i_frag osd2.linux2.l_i_frag
#define i_fsize osd2.linux2.l_i_fsize
#define i_uid_low i_uid
#define i_gid_low i_gid
#define i_uid_high osd2.linux2.l_i_uid_high
#define i_gid_high osd2.linux2.l_i_gid_high
#define i_reserved2 osd2.linux2.l_i_reserved2
#elif defined(__GNU__)
#define i_translator osd1.hurd1.h_i_translator
#define i_frag osd2.hurd2.h_i_frag;
#define i_fsize osd2.hurd2.h_i_fsize;
#define i_uid_high osd2.hurd2.h_i_uid_high
#define i_gid_high osd2.hurd2.h_i_gid_high
#define i_author osd2.hurd2.h_i_author
#elif defined(__masix__)
#define i_reserved1 osd1.masix1.m_i_reserved1
#define i_frag osd2.masix2.m_i_frag
#define i_fsize osd2.masix2.m_i_fsize
#define i_reserved2 osd2.masix2.m_i_reserved2
#endif /* defined(__KERNEL__) || defined(__linux__) */
/*
* File system states
*/
#define EXT3_VALID_FS 0x0001 /* Unmounted cleanly */
#define EXT3_ERROR_FS 0x0002 /* Errors detected */
#define EXT3_ORPHAN_FS 0x0004 /* Orphans being recovered */
/*
* Misc. filesystem flags
*/
#define EXT2_FLAGS_SIGNED_HASH 0x0001 /* Signed dirhash in use */
#define EXT2_FLAGS_UNSIGNED_HASH 0x0002 /* Unsigned dirhash in use */
#define EXT2_FLAGS_TEST_FILESYS 0x0004 /* to test development code */
/*
* Mount flags
*/
#define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */
/* EXT3_MOUNT_OLDALLOC was there */
#define EXT3_MOUNT_GRPID 0x00004 /* Create files with directory's group */
#define EXT3_MOUNT_DEBUG 0x00008 /* Some debugging messages */
#define EXT3_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */
#define EXT3_MOUNT_ERRORS_RO 0x00020 /* Remount fs ro on errors */
#define EXT3_MOUNT_ERRORS_PANIC 0x00040 /* Panic on errors */
#define EXT3_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */
#define EXT3_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/
#define EXT3_MOUNT_ABORT 0x00200 /* Fatal error detected */
#define EXT3_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */
#define EXT3_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */
#define EXT3_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */
#define EXT3_MOUNT_WRITEBACK_DATA 0x00C00 /* No data ordering */
#define EXT3_MOUNT_UPDATE_JOURNAL 0x01000 /* Update the journal format */
#define EXT3_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */
#define EXT3_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */
#define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
#define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
#define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
#define EXT3_MOUNT_DATA_ERR_ABORT 0x400000 /* Abort on file data write
* error in ordered mode */
/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
#ifndef _LINUX_EXT2_FS_H
#define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt
#define set_opt(o, opt) o |= EXT3_MOUNT_##opt
#define test_opt(sb, opt) (EXT3_SB(sb)->s_mount_opt & \
EXT3_MOUNT_##opt)
#else
#define EXT2_MOUNT_NOLOAD EXT3_MOUNT_NOLOAD
#define EXT2_MOUNT_ABORT EXT3_MOUNT_ABORT
#define EXT2_MOUNT_DATA_FLAGS EXT3_MOUNT_DATA_FLAGS
#endif
#define ext3_set_bit __set_bit_le
#define ext3_set_bit_atomic ext2_set_bit_atomic
#define ext3_clear_bit __clear_bit_le
#define ext3_clear_bit_atomic ext2_clear_bit_atomic
#define ext3_test_bit test_bit_le
#define ext3_find_next_zero_bit find_next_zero_bit_le
/*
* Maximal mount counts between two filesystem checks
*/
#define EXT3_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
#define EXT3_DFL_CHECKINTERVAL 0 /* Don't use interval check */
/*
* Behaviour when detecting errors
*/
#define EXT3_ERRORS_CONTINUE 1 /* Continue execution */
#define EXT3_ERRORS_RO 2 /* Remount fs read-only */
#define EXT3_ERRORS_PANIC 3 /* Panic */
#define EXT3_ERRORS_DEFAULT EXT3_ERRORS_CONTINUE
/*
* Structure of the super block
*/
struct ext3_super_block {
/*00*/ __le32 s_inodes_count; /* Inodes count */
__le32 s_blocks_count; /* Blocks count */
__le32 s_r_blocks_count; /* Reserved blocks count */
__le32 s_free_blocks_count; /* Free blocks count */
/*10*/ __le32 s_free_inodes_count; /* Free inodes count */
__le32 s_first_data_block; /* First Data Block */
__le32 s_log_block_size; /* Block size */
__le32 s_log_frag_size; /* Fragment size */
/*20*/ __le32 s_blocks_per_group; /* # Blocks per group */
__le32 s_frags_per_group; /* # Fragments per group */
__le32 s_inodes_per_group; /* # Inodes per group */
__le32 s_mtime; /* Mount time */
/*30*/ __le32 s_wtime; /* Write time */
__le16 s_mnt_count; /* Mount count */
__le16 s_max_mnt_count; /* Maximal mount count */
__le16 s_magic; /* Magic signature */
__le16 s_state; /* File system state */
__le16 s_errors; /* Behaviour when detecting errors */
__le16 s_minor_rev_level; /* minor revision level */
/*40*/ __le32 s_lastcheck; /* time of last check */
__le32 s_checkinterval; /* max. time between checks */
__le32 s_creator_os; /* OS */
__le32 s_rev_level; /* Revision level */
/*50*/ __le16 s_def_resuid; /* Default uid for reserved blocks */
__le16 s_def_resgid; /* Default gid for reserved blocks */
/*
* These fields are for EXT3_DYNAMIC_REV superblocks only.
*
* Note: the difference between the compatible feature set and
* the incompatible feature set is that if there is a bit set
* in the incompatible feature set that the kernel doesn't
* know about, it should refuse to mount the filesystem.
*
* e2fsck's requirements are more strict; if it doesn't know
* about a feature in either the compatible or incompatible
* feature set, it must abort and not try to meddle with
* things it doesn't understand...
*/
__le32 s_first_ino; /* First non-reserved inode */
__le16 s_inode_size; /* size of inode structure */
__le16 s_block_group_nr; /* block group # of this superblock */
__le32 s_feature_compat; /* compatible feature set */
/*60*/ __le32 s_feature_incompat; /* incompatible feature set */
__le32 s_feature_ro_compat; /* readonly-compatible feature set */
/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
/*78*/ char s_volume_name[16]; /* volume name */
/*88*/ char s_last_mounted[64]; /* directory where last mounted */
/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */
/*
* Performance hints. Directory preallocation should only
* happen if the EXT3_FEATURE_COMPAT_DIR_PREALLOC flag is on.
*/
__u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
__u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
__le16 s_reserved_gdt_blocks; /* Per group desc for online growth */
/*
* Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
*/
/*D0*/ __u8 s_journal_uuid[16]; /* uuid of journal superblock */
/*E0*/ __le32 s_journal_inum; /* inode number of journal file */
__le32 s_journal_dev; /* device number of journal file */
__le32 s_last_orphan; /* start of list of inodes to delete */
__le32 s_hash_seed[4]; /* HTREE hash seed */
__u8 s_def_hash_version; /* Default hash version to use */
__u8 s_reserved_char_pad;
__u16 s_reserved_word_pad;
__le32 s_default_mount_opts;
__le32 s_first_meta_bg; /* First metablock block group */
__le32 s_mkfs_time; /* When the filesystem was created */
__le32 s_jnl_blocks[17]; /* Backup of the journal inode */
/* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
__le32 s_r_blocks_count_hi; /* Reserved blocks count */
__le32 s_free_blocks_count_hi; /* Free blocks count */
__le16 s_min_extra_isize; /* All inodes have at least # bytes */
__le16 s_want_extra_isize; /* New inodes should reserve # bytes */
__le32 s_flags; /* Miscellaneous flags */
__le16 s_raid_stride; /* RAID stride */
__le16 s_mmp_interval; /* # seconds to wait in MMP checking */
__le64 s_mmp_block; /* Block for multi-mount protection */
__le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
__u8 s_log_groups_per_flex; /* FLEX_BG group size */
__u8 s_reserved_char_pad2;
__le16 s_reserved_pad;
__u32 s_reserved[162]; /* Padding to the end of the block */
};
#ifdef __KERNEL__
#include <linux/ext3_fs_i.h>
#include <linux/ext3_fs_sb.h>
static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
{
return container_of(inode, struct ext3_inode_info, vfs_inode);
}
static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT3_ROOT_INO ||
ino == EXT3_JOURNAL_INO ||
ino == EXT3_RESIZE_INO ||
(ino >= EXT3_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
}
/*
* Inode dynamic state flags
*/
enum {
EXT3_STATE_JDATA, /* journaled data exists */
EXT3_STATE_NEW, /* inode is newly created */
EXT3_STATE_XATTR, /* has in-inode xattrs */
EXT3_STATE_FLUSH_ON_CLOSE, /* flush dirty pages on close */
};
static inline int ext3_test_inode_state(struct inode *inode, int bit)
{
return test_bit(bit, &EXT3_I(inode)->i_state_flags);
}
static inline void ext3_set_inode_state(struct inode *inode, int bit)
{
set_bit(bit, &EXT3_I(inode)->i_state_flags);
}
static inline void ext3_clear_inode_state(struct inode *inode, int bit)
{
clear_bit(bit, &EXT3_I(inode)->i_state_flags);
}
#else
/* Assume that user mode programs are passing in an ext3fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
* macros from user land. */
#define EXT3_SB(sb) (sb)
#endif
#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
/*
* Codes for operating systems
*/
#define EXT3_OS_LINUX 0
#define EXT3_OS_HURD 1
#define EXT3_OS_MASIX 2
#define EXT3_OS_FREEBSD 3
#define EXT3_OS_LITES 4
/*
* Revision levels
*/
#define EXT3_GOOD_OLD_REV 0 /* The good old (original) format */
#define EXT3_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */
#define EXT3_CURRENT_REV EXT3_GOOD_OLD_REV
#define EXT3_MAX_SUPP_REV EXT3_DYNAMIC_REV
#define EXT3_GOOD_OLD_INODE_SIZE 128
/*
* Feature set definitions
*/
#define EXT3_HAS_COMPAT_FEATURE(sb,mask) \
( EXT3_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
#define EXT3_HAS_RO_COMPAT_FEATURE(sb,mask) \
( EXT3_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
#define EXT3_HAS_INCOMPAT_FEATURE(sb,mask) \
( EXT3_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
#define EXT3_SET_COMPAT_FEATURE(sb,mask) \
EXT3_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
#define EXT3_SET_RO_COMPAT_FEATURE(sb,mask) \
EXT3_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
#define EXT3_SET_INCOMPAT_FEATURE(sb,mask) \
EXT3_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
#define EXT3_CLEAR_COMPAT_FEATURE(sb,mask) \
EXT3_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
#define EXT3_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
EXT3_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
#define EXT3_CLEAR_INCOMPAT_FEATURE(sb,mask) \
EXT3_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
#define EXT3_FEATURE_COMPAT_DIR_PREALLOC 0x0001
#define EXT3_FEATURE_COMPAT_IMAGIC_INODES 0x0002
#define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004
#define EXT3_FEATURE_COMPAT_EXT_ATTR 0x0008
#define EXT3_FEATURE_COMPAT_RESIZE_INODE 0x0010
#define EXT3_FEATURE_COMPAT_DIR_INDEX 0x0020
#define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
#define EXT3_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
#define EXT3_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
#define EXT3_FEATURE_INCOMPAT_COMPRESSION 0x0001
#define EXT3_FEATURE_INCOMPAT_FILETYPE 0x0002
#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
#define EXT3_FEATURE_INCOMPAT_META_BG 0x0010
#define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
#define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \
EXT3_FEATURE_INCOMPAT_RECOVER| \
EXT3_FEATURE_INCOMPAT_META_BG)
#define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
/*
* Default values for user and/or group using reserved blocks
*/
#define EXT3_DEF_RESUID 0
#define EXT3_DEF_RESGID 0
/*
* Default mount options
*/
#define EXT3_DEFM_DEBUG 0x0001
#define EXT3_DEFM_BSDGROUPS 0x0002
#define EXT3_DEFM_XATTR_USER 0x0004
#define EXT3_DEFM_ACL 0x0008
#define EXT3_DEFM_UID16 0x0010
#define EXT3_DEFM_JMODE 0x0060
#define EXT3_DEFM_JMODE_DATA 0x0020
#define EXT3_DEFM_JMODE_ORDERED 0x0040
#define EXT3_DEFM_JMODE_WBACK 0x0060
/*
* Structure of a directory entry
*/
#define EXT3_NAME_LEN 255
struct ext3_dir_entry {
__le32 inode; /* Inode number */
__le16 rec_len; /* Directory entry length */
__le16 name_len; /* Name length */
char name[EXT3_NAME_LEN]; /* File name */
};
/*
* The new version of the directory entry. Since EXT3 structures are
* stored in intel byte order, and the name_len field could never be
* bigger than 255 chars, it's safe to reclaim the extra byte for the
* file_type field.
*/
struct ext3_dir_entry_2 {
__le32 inode; /* Inode number */
__le16 rec_len; /* Directory entry length */
__u8 name_len; /* Name length */
__u8 file_type;
char name[EXT3_NAME_LEN]; /* File name */
};
/*
* Ext3 directory file types. Only the low 3 bits are used. The
* other bits are reserved for now.
*/
#define EXT3_FT_UNKNOWN 0
#define EXT3_FT_REG_FILE 1
#define EXT3_FT_DIR 2
#define EXT3_FT_CHRDEV 3
#define EXT3_FT_BLKDEV 4
#define EXT3_FT_FIFO 5
#define EXT3_FT_SOCK 6
#define EXT3_FT_SYMLINK 7
#define EXT3_FT_MAX 8
/*
* EXT3_DIR_PAD defines the directory entries boundaries
*
* NOTE: It must be a multiple of 4
*/
#define EXT3_DIR_PAD 4
#define EXT3_DIR_ROUND (EXT3_DIR_PAD - 1)
#define EXT3_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT3_DIR_ROUND) & \
~EXT3_DIR_ROUND)
#define EXT3_MAX_REC_LEN ((1<<16)-1)
/*
* Tests against MAX_REC_LEN etc were put in place for 64k block
* sizes; if that is not possible on this arch, we can skip
* those tests and speed things up.
*/
static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
#if (PAGE_CACHE_SIZE >= 65536)
if (len == EXT3_MAX_REC_LEN)
return 1 << 16;
#endif
return len;
}
static inline __le16 ext3_rec_len_to_disk(unsigned len)
{
#if (PAGE_CACHE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(EXT3_MAX_REC_LEN);
else if (len > (1 << 16))
BUG();
#endif
return cpu_to_le16(len);
}
/*
* Hash Tree Directory indexing
* (c) Daniel Phillips, 2001
*/
#define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
EXT3_FEATURE_COMPAT_DIR_INDEX) && \
(EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
/* Legal values for the dx_root hash_version field: */
#define DX_HASH_LEGACY 0
#define DX_HASH_HALF_MD4 1
#define DX_HASH_TEA 2
#define DX_HASH_LEGACY_UNSIGNED 3
#define DX_HASH_HALF_MD4_UNSIGNED 4
#define DX_HASH_TEA_UNSIGNED 5
#ifdef __KERNEL__
/* hash info structure used by the directory hash */
struct dx_hash_info
{
u32 hash;
u32 minor_hash;
int hash_version;
u32 *seed;
};
#define EXT3_HTREE_EOF 0x7fffffff
/*
* Control parameters used by ext3_htree_next_block
*/
#define HASH_NB_ALWAYS 1
/*
* Describe an inode's exact location on disk and in memory
*/
struct ext3_iloc
{
struct buffer_head *bh;
unsigned long offset;
unsigned long block_group;
};
static inline struct ext3_inode *ext3_raw_inode(struct ext3_iloc *iloc)
{
return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
}
/*
* This structure is stuffed into the struct file's private_data field
* for directories. It is where we put information so that we can do
* readdir operations in hash tree order.
*/
struct dir_private_info {
struct rb_root root;
struct rb_node *curr_node;
struct fname *extra_fname;
loff_t last_pos;
__u32 curr_hash;
__u32 curr_minor_hash;
__u32 next_hash;
};
/* calculate the first block number of the group */
static inline ext3_fsblk_t
ext3_group_first_block_no(struct super_block *sb, unsigned long group_no)
{
return group_no * (ext3_fsblk_t)EXT3_BLOCKS_PER_GROUP(sb) +
le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
}
/*
* Special error return code only used by dx_probe() and its callers.
*/
#define ERR_BAD_DX_DIR -75000
/*
* Function prototypes
*/
/*
* Ok, these declarations are also in <linux/kernel.h> but none of the
* ext3 source programs needs to include it so they are duplicated here.
*/
# define NORET_TYPE /**/
# define ATTRIB_NORET __attribute__((noreturn))
# define NORET_AND noreturn,
/* balloc.c */
extern int ext3_bg_has_super(struct super_block *sb, int group);
extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
ext3_fsblk_t goal, int *errp);
extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
ext3_fsblk_t goal, unsigned long *count, int *errp);
extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
ext3_fsblk_t block, unsigned long count);
extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
ext3_fsblk_t block, unsigned long count,
unsigned long *pdquot_freed_blocks);
extern ext3_fsblk_t ext3_count_free_blocks (struct super_block *);
extern void ext3_check_blocks_bitmap (struct super_block *);
extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
unsigned int block_group,
struct buffer_head ** bh);
extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
extern void ext3_init_block_alloc_info(struct inode *);
extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
/* dir.c */
extern int ext3_check_dir_entry(const char *, struct inode *,
struct ext3_dir_entry_2 *,
struct buffer_head *, unsigned long);
extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext3_dir_entry_2 *dirent);
extern void ext3_htree_free_dir_info(struct dir_private_info *p);
/* fsync.c */
extern int ext3_sync_file(struct file *, loff_t, loff_t, int);
/* hash.c */
extern int ext3fs_dirhash(const char *name, int len, struct
dx_hash_info *hinfo);
/* ialloc.c */
extern struct inode * ext3_new_inode (handle_t *, struct inode *,
const struct qstr *, umode_t);
extern void ext3_free_inode (handle_t *, struct inode *);
extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
extern unsigned long ext3_count_free_inodes (struct super_block *);
extern unsigned long ext3_count_dirs (struct super_block *);
extern void ext3_check_inodes_bitmap (struct super_block *);
extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
/* inode.c */
int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
struct buffer_head *bh, ext3_fsblk_t blocknr);
struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
int create);
extern struct inode *ext3_iget(struct super_block *, unsigned long);
extern int ext3_write_inode (struct inode *, struct writeback_control *);
extern int ext3_setattr (struct dentry *, struct iattr *);
extern void ext3_evict_inode (struct inode *);
extern int ext3_sync_inode (handle_t *, struct inode *);
extern void ext3_discard_reservation (struct inode *);
extern void ext3_dirty_inode(struct inode *, int);
extern int ext3_change_inode_journal_flag(struct inode *, int);
extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
extern int ext3_can_truncate(struct inode *inode);
extern void ext3_truncate(struct inode *inode);
extern void ext3_set_inode_flags(struct inode *);
extern void ext3_get_inode_flags(struct ext3_inode_info *);
extern void ext3_set_aops(struct inode *inode);
extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
/* ioctl.c */
extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
/* namei.c */
extern int ext3_orphan_add(handle_t *, struct inode *);
extern int ext3_orphan_del(handle_t *, struct inode *);
extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
__u32 start_minor_hash, __u32 *next_hash);
/* resize.c */
extern int ext3_group_add(struct super_block *sb,
struct ext3_new_group_data *input);
extern int ext3_group_extend(struct super_block *sb,
struct ext3_super_block *es,
ext3_fsblk_t n_blocks_count);
/* super.c */
extern __printf(3, 4)
void ext3_error(struct super_block *, const char *, const char *, ...);
extern void __ext3_std_error (struct super_block *, const char *, int);
extern __printf(3, 4)
void ext3_abort(struct super_block *, const char *, const char *, ...);
extern __printf(3, 4)
void ext3_warning(struct super_block *, const char *, const char *, ...);
extern __printf(3, 4)
void ext3_msg(struct super_block *, const char *, const char *, ...);
extern void ext3_update_dynamic_rev (struct super_block *sb);
#define ext3_std_error(sb, errno) \
do { \
if ((errno)) \
__ext3_std_error((sb), __func__, (errno)); \
} while (0)
/*
* Inodes and files operations
*/
/* dir.c */
extern const struct file_operations ext3_dir_operations;
/* file.c */
extern const struct inode_operations ext3_file_inode_operations;
extern const struct file_operations ext3_file_operations;
/* namei.c */
extern const struct inode_operations ext3_dir_inode_operations;
extern const struct inode_operations ext3_special_inode_operations;
/* symlink.c */
extern const struct inode_operations ext3_symlink_inode_operations;
extern const struct inode_operations ext3_fast_symlink_inode_operations;
#endif /* __KERNEL__ */
#endif /* _LINUX_EXT3_FS_H */

View File

@@ -1,151 +0,0 @@
/*
* linux/include/linux/ext3_fs_i.h
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/include/linux/minix_fs_i.h
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#ifndef _LINUX_EXT3_FS_I
#define _LINUX_EXT3_FS_I
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/seqlock.h>
#include <linux/mutex.h>
/* data type for block offset of block group */
typedef int ext3_grpblk_t;
/* data type for filesystem-wide blocks number */
typedef unsigned long ext3_fsblk_t;
#define E3FSBLK "%lu"
struct ext3_reserve_window {
ext3_fsblk_t _rsv_start; /* First byte reserved */
ext3_fsblk_t _rsv_end; /* Last byte reserved or 0 */
};
struct ext3_reserve_window_node {
struct rb_node rsv_node;
__u32 rsv_goal_size;
__u32 rsv_alloc_hit;
struct ext3_reserve_window rsv_window;
};
struct ext3_block_alloc_info {
/* information about reservation window */
struct ext3_reserve_window_node rsv_window_node;
/*
* was i_next_alloc_block in ext3_inode_info
* is the logical (file-relative) number of the
* most-recently-allocated block in this file.
* We use this for detecting linearly ascending allocation requests.
*/
__u32 last_alloc_logical_block;
/*
* Was i_next_alloc_goal in ext3_inode_info
* is the *physical* companion to i_next_alloc_block.
* it the physical block number of the block which was most-recentl
* allocated to this file. This give us the goal (target) for the next
* allocation when we detect linearly ascending requests.
*/
ext3_fsblk_t last_alloc_physical_block;
};
#define rsv_start rsv_window._rsv_start
#define rsv_end rsv_window._rsv_end
/*
* third extended file system inode data in memory
*/
struct ext3_inode_info {
__le32 i_data[15]; /* unconverted */
__u32 i_flags;
#ifdef EXT3_FRAGMENTS
__u32 i_faddr;
__u8 i_frag_no;
__u8 i_frag_size;
#endif
ext3_fsblk_t i_file_acl;
__u32 i_dir_acl;
__u32 i_dtime;
/*
* i_block_group is the number of the block group which contains
* this file's inode. Constant across the lifetime of the inode,
* it is ued for making block allocation decisions - we try to
* place a file's data blocks near its inode block, and new inodes
* near to their parent directory's inode.
*/
__u32 i_block_group;
unsigned long i_state_flags; /* Dynamic state flags for ext3 */
/* block reservation info */
struct ext3_block_alloc_info *i_block_alloc_info;
__u32 i_dir_start_lookup;
#ifdef CONFIG_EXT3_FS_XATTR
/*
* Extended attributes can be read independently of the main file
* data. Taking i_mutex even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
*/
struct rw_semaphore xattr_sem;
#endif
struct list_head i_orphan; /* unlinked but open inodes */
/*
* i_disksize keeps track of what the inode size is ON DISK, not
* in memory. During truncate, i_size is set to the new size by
* the VFS prior to calling ext3_truncate(), but the filesystem won't
* set i_disksize to 0 until the truncate is actually under way.
*
* The intent is that i_disksize always represents the blocks which
* are used by this file. This allows recovery to restart truncate
* on orphans if we crash during truncate. We actually write i_disksize
* into the on-disk inode when writing inodes out, instead of i_size.
*
* The only time when i_disksize and i_size may be different is when
* a truncate is in progress. The only things which change i_disksize
* are ext3_get_block (growth) and ext3_truncate (shrinkth).
*/
loff_t i_disksize;
/* on-disk additional length */
__u16 i_extra_isize;
/*
* truncate_mutex is for serialising ext3_truncate() against
* ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
* data tree are chopped off during truncate. We can't do that in
* ext3 because whenever we perform intermediate commits during
* truncate, the inode and all the metadata blocks *must* be in a
* consistent state which allows truncation of the orphans to restart
* during recovery. Hence we must fix the get_block-vs-truncate race
* by other means, so we have truncate_mutex.
*/
struct mutex truncate_mutex;
/*
* Transactions that contain inode's metadata needed to complete
* fsync and fdatasync, respectively.
*/
atomic_t i_sync_tid;
atomic_t i_datasync_tid;
struct inode vfs_inode;
};
#endif /* _LINUX_EXT3_FS_I */

View File

@@ -1,91 +0,0 @@
/*
* linux/include/linux/ext3_fs_sb.h
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/include/linux/minix_fs_sb.h
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#ifndef _LINUX_EXT3_FS_SB
#define _LINUX_EXT3_FS_SB
#ifdef __KERNEL__
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
#endif
#include <linux/rbtree.h>
/*
* third extended-fs super-block data in memory
*/
struct ext3_sb_info {
unsigned long s_frag_size; /* Size of a fragment in bytes */
unsigned long s_frags_per_block;/* Number of fragments per block */
unsigned long s_inodes_per_block;/* Number of inodes per block */
unsigned long s_frags_per_group;/* Number of fragments in a group */
unsigned long s_blocks_per_group;/* Number of blocks in a group */
unsigned long s_inodes_per_group;/* Number of inodes in a group */
unsigned long s_itb_per_group; /* Number of inode table blocks per group */
unsigned long s_gdb_count; /* Number of group descriptor blocks */
unsigned long s_desc_per_block; /* Number of group descriptors per block */
unsigned long s_groups_count; /* Number of groups in the fs */
unsigned long s_overhead_last; /* Last calculated overhead */
unsigned long s_blocks_last; /* Last seen block count */
struct buffer_head * s_sbh; /* Buffer containing the super block */
struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
struct buffer_head ** s_group_desc;
unsigned long s_mount_opt;
ext3_fsblk_t s_sb_block;
uid_t s_resuid;
gid_t s_resgid;
unsigned short s_mount_state;
unsigned short s_pad;
int s_addr_per_block_bits;
int s_desc_per_block_bits;
int s_inode_size;
int s_first_ino;
spinlock_t s_next_gen_lock;
u32 s_next_generation;
u32 s_hash_seed[4];
int s_def_hash_version;
int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
struct percpu_counter s_freeblocks_counter;
struct percpu_counter s_freeinodes_counter;
struct percpu_counter s_dirs_counter;
struct blockgroup_lock *s_blockgroup_lock;
/* root of the per fs reservation window tree */
spinlock_t s_rsv_window_lock;
struct rb_root s_rsv_window_root;
struct ext3_reserve_window_node s_rsv_window_head;
/* Journaling */
struct inode * s_journal_inode;
struct journal_s * s_journal;
struct list_head s_orphan;
struct mutex s_orphan_lock;
struct mutex s_resize_lock;
unsigned long s_commit_interval;
struct block_device *journal_bdev;
#ifdef CONFIG_QUOTA
char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
int s_jquota_fmt; /* Format of quota to use */
#endif
};
static inline spinlock_t *
sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
{
return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
}
#endif /* _LINUX_EXT3_FS_SB */

View File

@@ -1,229 +0,0 @@
/*
* linux/include/linux/ext3_jbd.h
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
*
* Copyright 1998--1999 Red Hat corp --- All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* Ext3-specific journaling extensions.
*/
#ifndef _LINUX_EXT3_JBD_H
#define _LINUX_EXT3_JBD_H
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/ext3_fs.h>
#define EXT3_JOURNAL(inode) (EXT3_SB((inode)->i_sb)->s_journal)
/* Define the number of blocks we need to account to a transaction to
* modify one block of data.
*
* We may have to touch one inode, one bitmap buffer, up to three
* indirection blocks, the group and superblock summaries, and the data
* block to complete the transaction. */
#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U
/* Extended attribute operations touch at most two data buffers,
* two bitmap buffers, and two group summaries, in addition to the inode
* and the superblock, which are already accounted for. */
#define EXT3_XATTR_TRANS_BLOCKS 6U
/* Define the minimum size for a transaction which modifies data. This
* needs to take into account the fact that we may end up modifying two
* quota files too (one for the group, one for the user quota). The
* superblock only gets updated once, of course, so don't bother
* counting that again for the quota updates. */
#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
EXT3_XATTR_TRANS_BLOCKS - 2 + \
EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
/* Delete operations potentially hit one directory's namespace plus an
* entire inode, plus arbitrary amounts of bitmap/indirection data. Be
* generous. We can grow the delete transaction later if necessary. */
#define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
/* Define an arbitrary limit for the amount of data we will anticipate
* writing to any given transaction. For unbounded transactions such as
* write(2) and truncate(2) we can write more than this, but we always
* start off at the maximum transaction size and grow the transaction
* optimistically as we go. */
#define EXT3_MAX_TRANS_DATA 64U
/* We break up a large truncate or write transaction once the handle's
* buffer credits gets this low, we need either to extend the
* transaction or to start a new one. Reserve enough space here for
* inode, bitmap, superblock, group and indirection updates for at least
* one block, plus two quota updates. Quota allocations are not
* needed. */
#define EXT3_RESERVE_TRANS_BLOCKS 12U
#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
#ifdef CONFIG_QUOTA
/* Amount of blocks needed for quota update - we know that the structure was
* allocated so we need to update only inode+data */
#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
/* Amount of blocks needed for quota insert/delete - we do some block writes
* but inode, sb and group updates are done only once */
#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
#else
#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
#endif
#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
int
ext3_mark_iloc_dirty(handle_t *handle,
struct inode *inode,
struct ext3_iloc *iloc);
/*
* On success, We end up with an outstanding reference count against
* iloc->bh. This _must_ be cleaned up later.
*/
int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext3_iloc *iloc);
int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
/*
* Wrapper functions with which ext3 calls into JBD. The intent here is
* to allow these to be turned into appropriate stubs so ext3 can control
* ext2 filesystems, so ext2+ext3 systems only nee one fs. This work hasn't
* been done yet.
*/
static inline void ext3_journal_release_buffer(handle_t *handle,
struct buffer_head *bh)
{
journal_release_buffer(handle, bh);
}
void ext3_journal_abort_handle(const char *caller, const char *err_fn,
struct buffer_head *bh, handle_t *handle, int err);
int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
struct buffer_head *bh);
int __ext3_journal_get_write_access(const char *where, handle_t *handle,
struct buffer_head *bh);
int __ext3_journal_forget(const char *where, handle_t *handle,
struct buffer_head *bh);
int __ext3_journal_revoke(const char *where, handle_t *handle,
unsigned long blocknr, struct buffer_head *bh);
int __ext3_journal_get_create_access(const char *where,
handle_t *handle, struct buffer_head *bh);
int __ext3_journal_dirty_metadata(const char *where,
handle_t *handle, struct buffer_head *bh);
#define ext3_journal_get_undo_access(handle, bh) \
__ext3_journal_get_undo_access(__func__, (handle), (bh))
#define ext3_journal_get_write_access(handle, bh) \
__ext3_journal_get_write_access(__func__, (handle), (bh))
#define ext3_journal_revoke(handle, blocknr, bh) \
__ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
#define ext3_journal_get_create_access(handle, bh) \
__ext3_journal_get_create_access(__func__, (handle), (bh))
#define ext3_journal_dirty_metadata(handle, bh) \
__ext3_journal_dirty_metadata(__func__, (handle), (bh))
#define ext3_journal_forget(handle, bh) \
__ext3_journal_forget(__func__, (handle), (bh))
int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
int __ext3_journal_stop(const char *where, handle_t *handle);
static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
{
return ext3_journal_start_sb(inode->i_sb, nblocks);
}
#define ext3_journal_stop(handle) \
__ext3_journal_stop(__func__, (handle))
static inline handle_t *ext3_journal_current_handle(void)
{
return journal_current_handle();
}
static inline int ext3_journal_extend(handle_t *handle, int nblocks)
{
return journal_extend(handle, nblocks);
}
static inline int ext3_journal_restart(handle_t *handle, int nblocks)
{
return journal_restart(handle, nblocks);
}
static inline int ext3_journal_blocks_per_page(struct inode *inode)
{
return journal_blocks_per_page(inode);
}
static inline int ext3_journal_force_commit(journal_t *journal)
{
return journal_force_commit(journal);
}
/* super.c */
int ext3_force_commit(struct super_block *sb);
static inline int ext3_should_journal_data(struct inode *inode)
{
if (!S_ISREG(inode->i_mode))
return 1;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
return 1;
if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
return 1;
return 0;
}
static inline int ext3_should_order_data(struct inode *inode)
{
if (!S_ISREG(inode->i_mode))
return 0;
if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
return 1;
return 0;
}
static inline int ext3_should_writeback_data(struct inode *inode)
{
if (!S_ISREG(inode->i_mode))
return 0;
if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
return 1;
return 0;
}
#endif /* _LINUX_EXT3_JBD_H */

View File

@@ -407,7 +407,6 @@ struct fb_cursor {
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/list.h>
@@ -1003,6 +1002,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
extern void remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);

View File

@@ -12,7 +12,6 @@
struct file;
extern void fput(struct file *);
extern void drop_file_write_access(struct file *file);
struct file_operations;
struct vfsmount;

View File

@@ -207,12 +207,16 @@ struct fw_cdev_event_request2 {
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_ISO_INTERRUPT
* @cycle: Cycle counter of the interrupt packet
* @cycle: Cycle counter of the last completed packet
* @header_length: Total length of following headers, in bytes
* @header: Stripped headers, if any
*
* This event is sent when the controller has completed an &fw_cdev_iso_packet
* with the %FW_CDEV_ISO_INTERRUPT bit set.
* with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
* %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
* without the interrupt bit set that the kernel's internal buffer for @header
* is about to overflow. (In the last case, kernels with ABI version < 5 drop
* header data up to the next interrupt packet.)
*
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
@@ -267,9 +271,9 @@ struct fw_cdev_event_iso_interrupt {
*
* This event is sent in multichannel contexts (context type
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
* chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens
* when a packet is completed and/or when a buffer chunk is completed depends
* on the hardware implementation.
* chunks that have been completely filled and that have the
* %FW_CDEV_ISO_INTERRUPT bit set, or when explicitly requested with
* %FW_CDEV_IOC_FLUSH_ISO.
*
* The buffer is continuously filled with the following data, per packet:
* - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt,
@@ -419,6 +423,9 @@ union fw_cdev_event {
#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets)
#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels)
/* available since kernel version 3.4 */
#define FW_CDEV_IOC_FLUSH_ISO _IOW('#', 0x18, struct fw_cdev_flush_iso)
/*
* ABI version history
* 1 (2.6.22) - initial version
@@ -441,6 +448,9 @@ union fw_cdev_event {
* - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL,
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and
* %FW_CDEV_IOC_SET_ISO_CHANNELS
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
* avoid dropping data
* - added %FW_CDEV_IOC_FLUSH_ISO
*/
/**
@@ -850,6 +860,25 @@ struct fw_cdev_stop_iso {
__u32 handle;
};
/**
* struct fw_cdev_flush_iso - flush completed iso packets
* @handle: handle of isochronous context to flush
*
* For %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE contexts,
* report any completed packets.
*
* For %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL contexts, report the current
* offset in the receive buffer, if it has changed; this is typically in the
* middle of some buffer chunk.
*
* Any %FW_CDEV_EVENT_ISO_INTERRUPT or %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
* events generated by this ioctl are sent synchronously, i.e., are available
* for reading from the file descriptor when this ioctl returns.
*/
struct fw_cdev_flush_iso {
__u32 handle;
};
/**
* struct fw_cdev_get_cycle_timer - read cycle timer register
* @local_time: system time, in microseconds since the Epoch

View File

@@ -17,9 +17,6 @@
#include <linux/atomic.h>
#include <asm/byteorder.h>
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
#define CSR_REGISTER_BASE 0xfffff0000000ULL
/* register offsets are relative to CSR_REGISTER_BASE */
@@ -203,18 +200,6 @@ static inline int fw_device_is_shutdown(struct fw_device *device)
return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
}
static inline struct fw_device *fw_device_get(struct fw_device *device)
{
get_device(&device->device);
return device;
}
static inline void fw_device_put(struct fw_device *device)
{
put_device(&device->device);
}
int fw_device_enable_phys_dma(struct fw_device *device);
/*
@@ -441,6 +426,7 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_buffer *buffer,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);

View File

@@ -92,6 +92,10 @@ struct inodes_stat_t {
/* File is opened using open(.., 3, ..) and is writeable only for ioctls
(specialy hack for floppy.c) */
#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)0x200)
/* 64bit hashes as llseek() offset (for directories) */
#define FMODE_64BITHASH ((__force fmode_t)0x400)
/*
* Don't update ctime and mtime.
@@ -389,6 +393,7 @@ struct inodes_stat_t {
#include <linux/prio_tree.h>
#include <linux/init.h>
#include <linux/pid.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/semaphore.h>
@@ -1210,6 +1215,7 @@ extern int vfs_setlease(struct file *, long, struct file_lock **);
extern int lease_modify(struct file_lock **, int);
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
extern void locks_delete_block(struct file_lock *waiter);
extern void lock_flocks(void);
extern void unlock_flocks(void);
#else /* !CONFIG_FILE_LOCKING */
@@ -1354,6 +1360,10 @@ static inline int lock_may_write(struct inode *inode, loff_t start,
return 1;
}
static inline void locks_delete_block(struct file_lock *waiter)
{
}
static inline void lock_flocks(void)
{
}
@@ -1459,6 +1469,7 @@ struct super_block {
u8 s_uuid[16]; /* UUID */
void *s_fs_info; /* Filesystem private info */
unsigned int s_max_links;
fmode_t s_mode;
/* Granularity of c/m/atime in ns.
@@ -1811,11 +1822,11 @@ static inline void inode_inc_iversion(struct inode *inode)
spin_unlock(&inode->i_lock);
}
extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
extern void touch_atime(struct path *);
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
touch_atime(file->f_path.mnt, file->f_path.dentry);
touch_atime(&file->f_path);
}
int sync_inode(struct inode *inode, struct writeback_control *wbc);
@@ -1870,19 +1881,6 @@ extern struct dentry *mount_pseudo(struct file_system_type *, char *,
const struct dentry_operations *dops,
unsigned long);
static inline void sb_mark_dirty(struct super_block *sb)
{
sb->s_dirt = 1;
}
static inline void sb_mark_clean(struct super_block *sb)
{
sb->s_dirt = 0;
}
static inline int sb_is_dirty(struct super_block *sb)
{
return sb->s_dirt;
}
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
(((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
@@ -2304,7 +2302,10 @@ extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
extern int generic_delete_inode(struct inode *inode);
extern int generic_drop_inode(struct inode *inode);
static inline int generic_drop_inode(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
@@ -2510,6 +2511,7 @@ extern int dcache_readdir(struct file *, void *, filldir_t);
extern int simple_setattr(struct dentry *, struct iattr *);
extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);

View File

@@ -0,0 +1,28 @@
/*
* Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MACH_MXS_DMA_H__
#define __MACH_MXS_DMA_H__
#include <linux/dmaengine.h>
struct mxs_dma_data {
int chan_irq;
};
static inline int mxs_dma_is_apbh(struct dma_chan *chan)
{
return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
}
static inline int mxs_dma_is_apbx(struct dma_chan *chan)
{
return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
}
#endif /* __MACH_MXS_DMA_H__ */

View File

@@ -14,6 +14,7 @@
#include <linux/fsnotify_backend.h>
#include <linux/audit.h>
#include <linux/slab.h>
#include <linux/bug.h>
/*
* fsnotify_d_instantiate - instantiate a dentry for inode

View File

@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
*
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
* is part of the global tracers sharing the same filter
* via set_ftrace_* debugfs files.
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care
* CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
* could be controled by following calls:
* ftrace_function_local_enable
* ftrace_function_local_disable
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1,
FTRACE_OPS_FL_DYNAMIC = 1 << 2,
FTRACE_OPS_FL_CONTROL = 1 << 3,
};
struct ftrace_ops {
ftrace_func_t func;
struct ftrace_ops *next;
unsigned long flags;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash;
@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);
/**
* ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
return;
(*this_cpu_ptr(ops->disabled))--;
}
/**
* ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
return;
(*this_cpu_ptr(ops->disabled))++;
}
/**
* ftrace_function_local_disabled - returns ftrace_ops disabled value
* on current cpu
*
* This function returns value of ftrace_ops::disabled on current cpu.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
return *this_cpu_ptr(ops->disabled);
}
extern void ftrace_stub(unsigned long a0, unsigned long a1);
#else /* !CONFIG_FUNCTION_TRACER */
@@ -178,12 +244,13 @@ struct dyn_ftrace {
};
int ftrace_force_update(void);
void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -314,9 +381,6 @@ extern void ftrace_enable_daemon(void);
#else
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
}
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
@@ -340,6 +404,9 @@ static inline int ftrace_text_reserved(void *start, void *end)
*/
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos) { return -ENODEV; }

View File

@@ -144,8 +144,14 @@ struct event_filter;
enum trace_reg {
TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER,
#ifdef CONFIG_PERF_EVENTS
TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER,
TRACE_REG_PERF_OPEN,
TRACE_REG_PERF_CLOSE,
TRACE_REG_PERF_ADD,
TRACE_REG_PERF_DEL,
#endif
};
struct ftrace_event_call;
@@ -157,7 +163,7 @@ struct ftrace_event_class {
void *perf_probe;
#endif
int (*reg)(struct ftrace_event_call *event,
enum trace_reg type);
enum trace_reg type, void *data);
int (*define_fields)(struct ftrace_event_call *);
struct list_head *(*get_fields)(struct ftrace_event_call *);
struct list_head fields;
@@ -165,7 +171,7 @@ struct ftrace_event_class {
};
extern int ftrace_event_reg(struct ftrace_event_call *event,
enum trace_reg type);
enum trace_reg type, void *data);
enum {
TRACE_EVENT_FL_ENABLED_BIT,
@@ -241,6 +247,7 @@ enum {
FILTER_STATIC_STRING,
FILTER_DYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
};
#define EVENT_STORAGE_SIZE 128

View File

@@ -596,6 +596,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
int partno, sector_t start,
sector_t len, int flags,

View File

@@ -168,6 +168,7 @@ struct gfs2_rindex {
#define GFS2_RGF_METAONLY 0x00000002
#define GFS2_RGF_DATAONLY 0x00000004
#define GFS2_RGF_NOALLOC 0x00000008
#define GFS2_RGF_TRIMMED 0x00000010
struct gfs2_rgrp {
struct gfs2_meta_header rg_header;

View File

@@ -14,6 +14,12 @@
#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
/* Gpio pin is open drain */
#define GPIOF_OPEN_DRAIN (1 << 2)
/* Gpio pin is open source */
#define GPIOF_OPEN_SOURCE (1 << 3)
/**
* struct gpio - a structure describing a GPIO with configuration
* @gpio: the GPIO number
@@ -34,6 +40,7 @@ struct gpio {
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
struct device;
struct gpio_chip;

View File

@@ -6,7 +6,7 @@ struct device;
struct gpio_keys_button {
/* Configuration parameters */
unsigned int code; /* input event code (KEY_*, SW_*) */
int gpio;
int gpio; /* -1 if this key does not support gpio */
int active_low;
const char *desc;
unsigned int type; /* input event type (EV_KEY, EV_SW, EV_ABS) */
@@ -14,6 +14,7 @@ struct gpio_keys_button {
int debounce_interval; /* debounce ticks interval in msecs */
bool can_disable;
int value; /* axis value for EV_ABS */
unsigned int irq; /* Irq number in case of interrupt keys */
};
struct gpio_keys_platform_data {

View File

@@ -3,6 +3,7 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
@@ -55,12 +56,12 @@ static inline void kunmap(struct page *page)
{
}
static inline void *__kmap_atomic(struct page *page)
static inline void *kmap_atomic(struct page *page)
{
pagefault_disable();
return page_address(page);
}
#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
static inline void __kunmap_atomic(void *addr)
{
@@ -109,27 +110,62 @@ static inline void kmap_atomic_idx_pop(void)
#endif
/*
* Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
* NOTE:
* kmap_atomic() and kunmap_atomic() with two arguments are deprecated.
* We only keep them for backward compatibility, any usage of them
* are now warned.
*/
#define kmap_atomic(page, args...) __kmap_atomic(page)
#define PASTE(a, b) a ## b
#define PASTE2(a, b) PASTE(a, b)
#define NARG_(_2, _1, n, ...) n
#define NARG(...) NARG_(__VA_ARGS__, 2, 1, :)
static inline void __deprecated *kmap_atomic_deprecated(struct page *page,
enum km_type km)
{
return kmap_atomic(page);
}
#define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)
#define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__)
#define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
static inline void __deprecated __kunmap_atomic_deprecated(void *addr,
enum km_type km)
{
__kunmap_atomic(addr);
}
/*
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
* kunmap_atomic() should get the return value of kmap_atomic, not the page.
*/
#define kunmap_atomic(addr, args...) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
__kunmap_atomic(addr); \
#define kunmap_atomic_deprecated(addr, km) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
__kunmap_atomic_deprecated(addr, km); \
} while (0)
#define kunmap_atomic_withcheck(addr) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
__kunmap_atomic(addr); \
} while (0)
#define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__)
#define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__)
#define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
/**** End of C pre-processor tricks for deprecated macros ****/
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_atomic(page, KM_USER0);
void *addr = kmap_atomic(page);
clear_user_page(addr, vaddr, page);
kunmap_atomic(addr, KM_USER0);
kunmap_atomic(addr);
}
#endif
@@ -180,16 +216,16 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page, KM_USER0);
void *kaddr = kmap_atomic(page);
clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0);
kunmap_atomic(kaddr);
}
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
void *kaddr = kmap_atomic(page, KM_USER0);
void *kaddr = kmap_atomic(page);
BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
@@ -199,7 +235,7 @@ static inline void zero_user_segments(struct page *page,
if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);
kunmap_atomic(kaddr, KM_USER0);
kunmap_atomic(kaddr);
flush_dcache_page(page);
}
@@ -228,11 +264,11 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
{
char *vfrom, *vto;
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
vfrom = kmap_atomic(from);
vto = kmap_atomic(to);
copy_user_page(vto, vfrom, vaddr, to);
kunmap_atomic(vto, KM_USER1);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto);
kunmap_atomic(vfrom);
}
#endif
@@ -241,11 +277,11 @@ static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
vfrom = kmap_atomic(from);
vto = kmap_atomic(to);
copy_page(vto, vfrom);
kunmap_atomic(vto, KM_USER1);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto);
kunmap_atomic(vfrom);
}
#endif /* _LINUX_HIGHMEM_H */

1
include/linux/hsi/Kbuild Normal file
View File

@@ -0,0 +1 @@
header-y += hsi_char.h

410
include/linux/hsi/hsi.h Normal file
View File

@@ -0,0 +1,410 @@
/*
* HSI core header file.
*
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Carlos Chinea <carlos.chinea@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#ifndef __LINUX_HSI_H__
#define __LINUX_HSI_H__
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/module.h>
/* HSI message ttype */
#define HSI_MSG_READ 0
#define HSI_MSG_WRITE 1
/* HSI configuration values */
enum {
HSI_MODE_STREAM = 1,
HSI_MODE_FRAME,
};
enum {
HSI_FLOW_SYNC, /* Synchronized flow */
HSI_FLOW_PIPE, /* Pipelined flow */
};
enum {
HSI_ARB_RR, /* Round-robin arbitration */
HSI_ARB_PRIO, /* Channel priority arbitration */
};
#define HSI_MAX_CHANNELS 16
/* HSI message status codes */
enum {
HSI_STATUS_COMPLETED, /* Message transfer is completed */
HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */
HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */
HSI_STATUS_QUEUED, /* Message waiting to be served */
HSI_STATUS_ERROR, /* Error when message transfer was ongoing */
};
/* HSI port event codes */
enum {
HSI_EVENT_START_RX,
HSI_EVENT_STOP_RX,
};
/**
* struct hsi_config - Configuration for RX/TX HSI modules
* @mode: Bit transmission mode (STREAM or FRAME)
* @channels: Number of channels to use [1..16]
* @speed: Max bit transmission speed (Kbit/s)
* @flow: RX flow type (SYNCHRONIZED or PIPELINE)
* @arb_mode: Arbitration mode for TX frame (Round robin, priority)
*/
struct hsi_config {
unsigned int mode;
unsigned int channels;
unsigned int speed;
union {
unsigned int flow; /* RX only */
unsigned int arb_mode; /* TX only */
};
};
/**
* struct hsi_board_info - HSI client board info
* @name: Name for the HSI device
* @hsi_id: HSI controller id where the client sits
* @port: Port number in the controller where the client sits
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
* @platform_data: Platform related data
* @archdata: Architecture-dependent device data
*/
struct hsi_board_info {
const char *name;
unsigned int hsi_id;
unsigned int port;
struct hsi_config tx_cfg;
struct hsi_config rx_cfg;
void *platform_data;
struct dev_archdata *archdata;
};
#ifdef CONFIG_HSI_BOARDINFO
extern int hsi_register_board_info(struct hsi_board_info const *info,
unsigned int len);
#else
static inline int hsi_register_board_info(struct hsi_board_info const *info,
unsigned int len)
{
return 0;
}
#endif /* CONFIG_HSI_BOARDINFO */
/**
* struct hsi_client - HSI client attached to an HSI port
* @device: Driver model representation of the device
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
* @hsi_start_rx: Called after incoming wake line goes high
* @hsi_stop_rx: Called after incoming wake line goes low
*/
struct hsi_client {
struct device device;
struct hsi_config tx_cfg;
struct hsi_config rx_cfg;
void (*hsi_start_rx)(struct hsi_client *cl);
void (*hsi_stop_rx)(struct hsi_client *cl);
/* private: */
unsigned int pclaimed:1;
struct list_head link;
};
#define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data)
{
dev_set_drvdata(&cl->device, data);
}
static inline void *hsi_client_drvdata(struct hsi_client *cl)
{
return dev_get_drvdata(&cl->device);
}
/**
* struct hsi_client_driver - Driver associated to an HSI client
* @driver: Driver model representation of the driver
*/
struct hsi_client_driver {
struct device_driver driver;
};
#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\
driver)
int hsi_register_client_driver(struct hsi_client_driver *drv);
static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv)
{
driver_unregister(&drv->driver);
}
/**
* struct hsi_msg - HSI message descriptor
* @link: Free to use by the current descriptor owner
* @cl: HSI device client that issues the transfer
* @sgt: Head of the scatterlist array
* @context: Client context data associated to the transfer
* @complete: Transfer completion callback
* @destructor: Destructor to free resources when flushing
* @status: Status of the transfer when completed
* @actual_len: Actual length of data transfered on completion
* @channel: Channel were to TX/RX the message
* @ttype: Transfer type (TX if set, RX otherwise)
* @break_frame: if true HSI will send/receive a break frame. Data buffers are
* ignored in the request.
*/
struct hsi_msg {
struct list_head link;
struct hsi_client *cl;
struct sg_table sgt;
void *context;
void (*complete)(struct hsi_msg *msg);
void (*destructor)(struct hsi_msg *msg);
int status;
unsigned int actual_len;
unsigned int channel;
unsigned int ttype:1;
unsigned int break_frame:1;
};
struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags);
void hsi_free_msg(struct hsi_msg *msg);
/**
* struct hsi_port - HSI port device
* @device: Driver model representation of the device
* @tx_cfg: Current TX path configuration
* @rx_cfg: Current RX path configuration
* @num: Port number
* @shared: Set when port can be shared by different clients
* @claimed: Reference count of clients which claimed the port
* @lock: Serialize port claim
* @async: Asynchronous transfer callback
* @setup: Callback to set the HSI client configuration
* @flush: Callback to clean the HW state and destroy all pending transfers
* @start_tx: Callback to inform that a client wants to TX data
* @stop_tx: Callback to inform that a client no longer wishes to TX data
* @release: Callback to inform that a client no longer uses the port
* @clients: List of hsi_clients using the port.
* @clock: Lock to serialize access to the clients list.
*/
struct hsi_port {
struct device device;
struct hsi_config tx_cfg;
struct hsi_config rx_cfg;
unsigned int num;
unsigned int shared:1;
int claimed;
struct mutex lock;
int (*async)(struct hsi_msg *msg);
int (*setup)(struct hsi_client *cl);
int (*flush)(struct hsi_client *cl);
int (*start_tx)(struct hsi_client *cl);
int (*stop_tx)(struct hsi_client *cl);
int (*release)(struct hsi_client *cl);
struct list_head clients;
spinlock_t clock;
};
#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
#define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
void hsi_event(struct hsi_port *port, unsigned int event);
int hsi_claim_port(struct hsi_client *cl, unsigned int share);
void hsi_release_port(struct hsi_client *cl);
static inline int hsi_port_claimed(struct hsi_client *cl)
{
return cl->pclaimed;
}
static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data)
{
dev_set_drvdata(&port->device, data);
}
static inline void *hsi_port_drvdata(struct hsi_port *port)
{
return dev_get_drvdata(&port->device);
}
/**
* struct hsi_controller - HSI controller device
* @device: Driver model representation of the device
* @owner: Pointer to the module owning the controller
* @id: HSI controller ID
* @num_ports: Number of ports in the HSI controller
* @port: Array of HSI ports
*/
struct hsi_controller {
struct device device;
struct module *owner;
unsigned int id;
unsigned int num_ports;
struct hsi_port *port;
};
#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags);
void hsi_free_controller(struct hsi_controller *hsi);
int hsi_register_controller(struct hsi_controller *hsi);
void hsi_unregister_controller(struct hsi_controller *hsi);
static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi,
void *data)
{
dev_set_drvdata(&hsi->device, data);
}
static inline void *hsi_controller_drvdata(struct hsi_controller *hsi)
{
return dev_get_drvdata(&hsi->device);
}
static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi,
unsigned int num)
{
return (num < hsi->num_ports) ? &hsi->port[num] : NULL;
}
/*
* API for HSI clients
*/
int hsi_async(struct hsi_client *cl, struct hsi_msg *msg);
/**
* hsi_id - Get HSI controller ID associated to a client
* @cl: Pointer to a HSI client
*
* Return the controller id where the client is attached to
*/
static inline unsigned int hsi_id(struct hsi_client *cl)
{
return to_hsi_controller(cl->device.parent->parent)->id;
}
/**
* hsi_port_id - Gets the port number a client is attached to
* @cl: Pointer to HSI client
*
* Return the port number associated to the client
*/
static inline unsigned int hsi_port_id(struct hsi_client *cl)
{
return to_hsi_port(cl->device.parent)->num;
}
/**
* hsi_setup - Configure the client's port
* @cl: Pointer to the HSI client
*
* When sharing ports, clients should either relay on a single
* client setup or have the same setup for all of them.
*
* Return -errno on failure, 0 on success
*/
static inline int hsi_setup(struct hsi_client *cl)
{
if (!hsi_port_claimed(cl))
return -EACCES;
return hsi_get_port(cl)->setup(cl);
}
/**
* hsi_flush - Flush all pending transactions on the client's port
* @cl: Pointer to the HSI client
*
* This function will destroy all pending hsi_msg in the port and reset
* the HW port so it is ready to receive and transmit from a clean state.
*
* Return -errno on failure, 0 on success
*/
static inline int hsi_flush(struct hsi_client *cl)
{
if (!hsi_port_claimed(cl))
return -EACCES;
return hsi_get_port(cl)->flush(cl);
}
/**
* hsi_async_read - Submit a read transfer
* @cl: Pointer to the HSI client
* @msg: HSI message descriptor of the transfer
*
* Return -errno on failure, 0 on success
*/
static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg)
{
msg->ttype = HSI_MSG_READ;
return hsi_async(cl, msg);
}
/**
* hsi_async_write - Submit a write transfer
* @cl: Pointer to the HSI client
* @msg: HSI message descriptor of the transfer
*
* Return -errno on failure, 0 on success
*/
static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg)
{
msg->ttype = HSI_MSG_WRITE;
return hsi_async(cl, msg);
}
/**
* hsi_start_tx - Signal the port that the client wants to start a TX
* @cl: Pointer to the HSI client
*
* Return -errno on failure, 0 on success
*/
static inline int hsi_start_tx(struct hsi_client *cl)
{
if (!hsi_port_claimed(cl))
return -EACCES;
return hsi_get_port(cl)->start_tx(cl);
}
/**
* hsi_stop_tx - Signal the port that the client no longer wants to transmit
* @cl: Pointer to the HSI client
*
* Return -errno on failure, 0 on success
*/
static inline int hsi_stop_tx(struct hsi_client *cl)
{
if (!hsi_port_claimed(cl))
return -EACCES;
return hsi_get_port(cl)->stop_tx(cl);
}
#endif /* __LINUX_HSI_H__ */

View File

@@ -0,0 +1,63 @@
/*
* Part of the HSI character device driver.
*
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Andras Domokos <andras.domokos at nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#ifndef __HSI_CHAR_H
#define __HSI_CHAR_H
#define HSI_CHAR_MAGIC 'k'
#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype)
#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype)
#define HSC_IOWR(num, dtype) _IOWR(HSI_CHAR_MAGIC, num, dtype)
#define HSC_IO(num) _IO(HSI_CHAR_MAGIC, num)
#define HSC_RESET HSC_IO(16)
#define HSC_SET_PM HSC_IO(17)
#define HSC_SEND_BREAK HSC_IO(18)
#define HSC_SET_RX HSC_IOW(19, struct hsc_rx_config)
#define HSC_GET_RX HSC_IOW(20, struct hsc_rx_config)
#define HSC_SET_TX HSC_IOW(21, struct hsc_tx_config)
#define HSC_GET_TX HSC_IOW(22, struct hsc_tx_config)
#define HSC_PM_DISABLE 0
#define HSC_PM_ENABLE 1
#define HSC_MODE_STREAM 1
#define HSC_MODE_FRAME 2
#define HSC_FLOW_SYNC 0
#define HSC_ARB_RR 0
#define HSC_ARB_PRIO 1
struct hsc_rx_config {
uint32_t mode;
uint32_t flow;
uint32_t channels;
};
struct hsc_tx_config {
uint32_t mode;
uint32_t channels;
uint32_t speed;
uint32_t arb_mode;
};
#endif /* __HSI_CHAR_H */

View File

@@ -51,6 +51,9 @@ extern pmd_t *page_check_address_pmd(struct page *page,
unsigned long address,
enum page_check_address_pmd_flag flag);
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT HPAGE_SHIFT
#define HPAGE_PMD_MASK HPAGE_MASK
@@ -102,8 +105,6 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
BUG_ON(pmd_trans_splitting(*____pmd) || \
pmd_trans_huge(*____pmd)); \
} while (0)
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
#if HPAGE_PMD_ORDER > MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator"
#endif
@@ -113,6 +114,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
extern int __pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
static inline int pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
if (pmd_trans_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return 0;
}
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
@@ -146,9 +159,9 @@ static inline struct page *compound_trans_head(struct page *page)
return page;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUG(); 0; })
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
#define hpage_nr_pages(x) 1
@@ -176,6 +189,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next)
{
}
static inline int pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */

View File

@@ -14,6 +14,15 @@ struct user_struct;
#include <linux/shm.h>
#include <asm/tlbflush.h>
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages, used_hpages;
};
struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
void hugepage_put_subpool(struct hugepage_subpool *spool);
int PageHuge(struct page *page);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
@@ -128,35 +137,14 @@ enum {
};
#ifdef CONFIG_HUGETLBFS
struct hugetlbfs_config {
uid_t uid;
gid_t gid;
umode_t mode;
long nr_blocks;
long nr_inodes;
struct hstate *hstate;
};
struct hugetlbfs_sb_info {
long max_blocks; /* blocks allowed */
long free_blocks; /* blocks free */
long max_inodes; /* inodes allowed */
long free_inodes; /* inodes free */
spinlock_t stat_lock;
struct hstate *hstate;
struct hugepage_subpool *spool;
};
struct hugetlbfs_inode_info {
struct shared_policy policy;
struct inode vfs_inode;
};
static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
{
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}
static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
{
return sb->s_fs_info;
@@ -164,10 +152,9 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
struct file *hugetlb_file_setup(const char *name, unsigned long addr,
size_t size, vm_flags_t acct,
struct user_struct **user, int creat_flags);
int hugetlb_get_quota(struct address_space *mapping, long delta);
void hugetlb_put_quota(struct address_space *mapping, long delta);
static inline int is_file_hugepages(struct file *file)
{
@@ -179,15 +166,11 @@ static inline int is_file_hugepages(struct file *file)
return 0;
}
static inline void set_file_hugepages(struct file *file)
{
file->f_op = &hugetlbfs_file_operations;
}
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) 0
#define set_file_hugepages(file) BUG()
static inline struct file *hugetlb_file_setup(const char *name, size_t size,
static inline struct file *
hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
vm_flags_t acctflag, struct user_struct **user, int creat_flags)
{
return ERR_PTR(-ENOSYS);

View File

@@ -20,6 +20,8 @@
#ifndef _LINUX_HWMON_SYSFS_H
#define _LINUX_HWMON_SYSFS_H
#include <linux/device.h>
struct sensor_device_attribute{
struct device_attribute dev_attr;
int index;

View File

@@ -14,7 +14,7 @@
#ifndef _HWMON_H_
#define _HWMON_H_
#include <linux/device.h>
struct device;
struct device *hwmon_device_register(struct device *dev);

View File

@@ -20,12 +20,12 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/device.h>
/* hwspinlock mode argument */
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
struct device;
struct hwspinlock;
struct hwspinlock_device;
struct hwspinlock_ops;

View File

@@ -25,6 +25,173 @@
#ifndef _HYPERV_H
#define _HYPERV_H
#include <linux/types.h>
/*
* An implementation of HyperV key value pair (KVP) functionality for Linux.
*
*
* Copyright (C) 2010, Novell, Inc.
* Author : K. Y. Srinivasan <ksrinivasan@novell.com>
*
*/
/*
* Maximum value size - used for both key names and value data, and includes
* any applicable NULL terminators.
*
* Note: This limit is somewhat arbitrary, but falls easily within what is
* supported for all native guests (back to Win 2000) and what is reasonable
* for the IC KVP exchange functionality. Note that Windows Me/98/95 are
* limited to 255 character key names.
*
* MSDN recommends not storing data values larger than 2048 bytes in the
* registry.
*
* Note: This value is used in defining the KVP exchange message - this value
* cannot be modified without affecting the message size and compatibility.
*/
/*
* bytes, including any null terminators
*/
#define HV_KVP_EXCHANGE_MAX_VALUE_SIZE (2048)
/*
* Maximum key size - the registry limit for the length of an entry name
* is 256 characters, including the null terminator
*/
#define HV_KVP_EXCHANGE_MAX_KEY_SIZE (512)
/*
* In Linux, we implement the KVP functionality in two components:
* 1) The kernel component which is packaged as part of the hv_utils driver
* is responsible for communicating with the host and responsible for
* implementing the host/guest protocol. 2) A user level daemon that is
* responsible for data gathering.
*
* Host/Guest Protocol: The host iterates over an index and expects the guest
* to assign a key name to the index and also return the value corresponding to
* the key. The host will have atmost one KVP transaction outstanding at any
* given point in time. The host side iteration stops when the guest returns
* an error. Microsoft has specified the following mapping of key names to
* host specified index:
*
* Index Key Name
* 0 FullyQualifiedDomainName
* 1 IntegrationServicesVersion
* 2 NetworkAddressIPv4
* 3 NetworkAddressIPv6
* 4 OSBuildNumber
* 5 OSName
* 6 OSMajorVersion
* 7 OSMinorVersion
* 8 OSVersion
* 9 ProcessorArchitecture
*
* The Windows host expects the Key Name and Key Value to be encoded in utf16.
*
* Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the
* data gathering functionality in a user mode daemon. The user level daemon
* is also responsible for binding the key name to the index as well. The
* kernel and user-level daemon communicate using a connector channel.
*
* The user mode component first registers with the
* the kernel component. Subsequently, the kernel component requests, data
* for the specified keys. In response to this message the user mode component
* fills in the value corresponding to the specified key. We overload the
* sequence field in the cn_msg header to define our KVP message types.
*
*
* The kernel component simply acts as a conduit for communication between the
* Windows host and the user-level daemon. The kernel component passes up the
* index received from the Host to the user-level daemon. If the index is
* valid (supported), the corresponding key as well as its
* value (both are strings) is returned. If the index is invalid
* (not supported), a NULL key string is returned.
*/
/*
* Registry value types.
*/
#define REG_SZ 1
#define REG_U32 4
#define REG_U64 8
enum hv_kvp_exchg_op {
KVP_OP_GET = 0,
KVP_OP_SET,
KVP_OP_DELETE,
KVP_OP_ENUMERATE,
KVP_OP_REGISTER,
KVP_OP_COUNT /* Number of operations, must be last. */
};
enum hv_kvp_exchg_pool {
KVP_POOL_EXTERNAL = 0,
KVP_POOL_GUEST,
KVP_POOL_AUTO,
KVP_POOL_AUTO_EXTERNAL,
KVP_POOL_AUTO_INTERNAL,
KVP_POOL_COUNT /* Number of pools, must be last. */
};
struct hv_kvp_hdr {
__u8 operation;
__u8 pool;
__u16 pad;
} __attribute__((packed));
struct hv_kvp_exchg_msg_value {
__u32 value_type;
__u32 key_size;
__u32 value_size;
__u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
union {
__u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
__u32 value_u32;
__u64 value_u64;
};
} __attribute__((packed));
struct hv_kvp_msg_enumerate {
__u32 index;
struct hv_kvp_exchg_msg_value data;
} __attribute__((packed));
struct hv_kvp_msg_get {
struct hv_kvp_exchg_msg_value data;
};
struct hv_kvp_msg_set {
struct hv_kvp_exchg_msg_value data;
};
struct hv_kvp_msg_delete {
__u32 key_size;
__u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
};
struct hv_kvp_register {
__u8 version[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
};
struct hv_kvp_msg {
struct hv_kvp_hdr kvp_hdr;
union {
struct hv_kvp_msg_get kvp_get;
struct hv_kvp_msg_set kvp_set;
struct hv_kvp_msg_delete kvp_delete;
struct hv_kvp_msg_enumerate kvp_enum_data;
struct hv_kvp_register kvp_register;
} body;
} __attribute__((packed));
#ifdef __KERNEL__
#include <linux/scatterlist.h>
#include <linux/list.h>
#include <linux/uuid.h>
@@ -785,6 +952,7 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
#define HV_S_OK 0x00000000
#define HV_E_FAIL 0x80004005
#define HV_S_CONT 0x80070103
#define HV_ERROR_NOT_SUPPORTED 0x80070032
#define HV_ERROR_MACHINE_LOCKED 0x800704F7
@@ -870,4 +1038,9 @@ struct hyperv_service_callback {
extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *);
int hv_kvp_init(struct hv_util_service *);
void hv_kvp_deinit(void);
void hv_kvp_onchannelcallback(void *);
#endif /* __KERNEL__ */
#endif /* _HYPERV_H */

Some files were not shown because too many files have changed in this diff Show More