Merge commit 'origin/master'

Manual merge of:

	arch/powerpc/Kconfig
	arch/powerpc/kernel/stacktrace.c
	arch/powerpc/mm/slice.c
	arch/ppc/kernel/smp.c
This commit is contained in:
Benjamin Herrenschmidt
2008-07-16 11:07:59 +10:00
941 changed files with 48176 additions and 76671 deletions

View File

@@ -651,7 +651,6 @@ extern void generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern void blk_end_sync_rq(struct request *rq, int error);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);

View File

@@ -3,6 +3,7 @@
#include <asm/atomic.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>

View File

@@ -1,18 +1,39 @@
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
#include <linux/module.h>
#include <linux/types.h>
#include <linux/compiler.h>
#define FIRMWARE_NAME_MAX 30
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
struct firmware {
size_t size;
u8 *data;
const u8 *data;
};
struct device;
struct builtin_fw {
char *name;
void *data;
unsigned long size;
};
/* We have to play tricks here much like stringify() to get the
__COUNTER__ macro to be expanded as we want it */
#define __fw_concat1(x, y) x##y
#define __fw_concat(x, y) __fw_concat1(x, y)
#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
__used __section(.builtin_fw) = { name, blob, size }
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);

View File

@@ -1740,6 +1740,8 @@ extern int wait_on_page_writeback_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end, int sync_mode);
extern int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
extern long do_fsync(struct file *file, int datasync);
extern void sync_supers(void);

View File

@@ -33,9 +33,11 @@ struct i2c_algo_pcf_data {
int (*getclock) (void *data);
void (*waitforpin) (void);
/* local settings */
int udelay;
int timeout;
/* Multi-master lost arbitration back-off delay (msecs)
* This should be set by the bus adapter or knowledgable client
* if bus is multi-mastered, else zero
*/
unsigned long lab_mdelay;
};
int i2c_pcf_add_bus(struct i2c_adapter *);

View File

@@ -91,8 +91,6 @@
#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
#define I2C_DRIVERID_I2CDEV 900
#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
/*
@@ -111,7 +109,6 @@
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */
#define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */
#define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */
#define I2C_HW_B_S3VIA 0x010018 /* S3Via ProSavage adapter */
#define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */
#define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */
#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */

View File

@@ -35,6 +35,8 @@
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
extern struct bus_type i2c_bus_type;
/* --- General options ------------------------------------------------ */
struct i2c_msg;
@@ -43,6 +45,7 @@ struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
union i2c_smbus_data;
struct i2c_board_info;
/*
* The master routines are the ones normally used to transmit data to devices
@@ -69,9 +72,8 @@ extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr,
union i2c_smbus_data * data);
/* Now follow the 'nice' access routines. These also document the calling
conventions of smbus_access. */
conventions of i2c_smbus_xfer. */
extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value);
extern s32 i2c_smbus_read_byte(struct i2c_client * client);
extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value);
extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command);
@@ -93,15 +95,33 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
u8 command, u8 length,
const u8 *values);
/*
* A driver is capable of handling one or more physical devices present on
* I2C adapters. This information is used to inform the driver of adapter
* events.
/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @detect: Callback for device detection
* @address_data: The I2C addresses to probe, ignore or force (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
*
* For automatic device detection, both @detect and @address_data must
* be defined. @class should also be set, otherwise only devices forced
* with module parameters will be created. The detect function must
* fill at least the name field of the i2c_board_info structure it is
* handed upon successful detection, and possibly also the flags field.
*
* If @detect is missing, the driver will still work fine for enumerated
* devices. Detected devices simply won't be supported. This is expected
* for the many I2C/SMBus devices which can't be detected reliably, and
* the ones which can always be enumerated in practice.
*
* The i2c_client structure which is handed to the @detect callback is
* not a real i2c_client. It is initialized just enough so that you can
* call i2c_smbus_read_byte_data and friends on it. Don't do anything
* else with it. In particular, calling dev_dbg and friends on it is
* not allowed.
*/
struct i2c_driver {
int id;
unsigned int class;
@@ -141,6 +161,11 @@ struct i2c_driver {
struct device_driver driver;
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
const struct i2c_client_address_data *address_data;
struct list_head clients;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -156,6 +181,7 @@ struct i2c_driver {
* @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any)
* @list: list of active/busy clients (DEPRECATED)
* @detected: member of an i2c_driver.clients list
* @released: used to synchronize client releases & detaches and references
*
* An i2c_client identifies a single device (i.e. chip) connected to an
@@ -173,6 +199,7 @@ struct i2c_client {
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head list; /* DEPRECATED */
struct list_head detected;
struct completion released;
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -350,10 +377,11 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */
#define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */
#define I2C_CLASS_DDC (1<<3) /* i2c-matroxfb ? */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_CAM_ANALOG (1<<4) /* camera with analog CCD */
#define I2C_CLASS_CAM_DIGITAL (1<<5) /* most webcams */
#define I2C_CLASS_SOUND (1<<6) /* sound devices */
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
#define I2C_CLASS_ALL (UINT_MAX) /* all of the above */
/* i2c_client_address_data is the struct for holding default client
@@ -537,7 +565,7 @@ union i2c_smbus_data {
/* and one more for user-space compatibility */
};
/* smbus_access read or write markers */
/* i2c_smbus_xfer read or write markers */
#define I2C_SMBUS_READ 1
#define I2C_SMBUS_WRITE 0

28
include/linux/i2c/at24.h Normal file
View File

@@ -0,0 +1,28 @@
#ifndef _LINUX_AT24_H
#define _LINUX_AT24_H
#include <linux/types.h>
/*
* As seen through Linux I2C, differences between the most common types of I2C
* memory include:
* - How much memory is available (usually specified in bit)?
* - What write page size does it support?
* - Special flags (16 bit addresses, read_only, world readable...)?
*
* If you set up a custom eeprom type, please double-check the parameters.
* Especially page_size needs extra care, as you risk data loss if your value
* is bigger than what the chip actually supports!
*/
struct at24_platform_data {
u32 byte_len; /* size (sum of all addr) */
u16 page_size; /* for writes */
u8 flags;
#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */
#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
};
#endif /* _LINUX_AT24_H */

View File

@@ -364,7 +364,6 @@ typedef struct ide_drive_s {
u8 wcache; /* status of write cache */
u8 acoustic; /* acoustic management */
u8 media; /* disk, cdrom, tape, floppy, ... */
u8 ctl; /* "normal" value for Control register */
u8 ready_stat; /* min status value for drive ready */
u8 mult_count; /* current multiple sector setting */
u8 mult_req; /* requested multiple sector setting */
@@ -493,7 +492,7 @@ typedef struct hwif_s {
void (*ide_dma_clear_irq)(ide_drive_t *drive);
void (*OUTB)(u8 addr, unsigned long port);
void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port);
u8 (*INB)(unsigned long port);
@@ -532,7 +531,6 @@ typedef struct hwif_s {
unsigned serialized : 1; /* serialized all channel operation */
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
unsigned mmio : 1; /* host uses MMIO */
struct device gendev;
struct device *portdev;
@@ -604,12 +602,13 @@ enum {
PC_FLAG_SUPPRESS_ERROR = (1 << 1),
PC_FLAG_WAIT_FOR_DSC = (1 << 2),
PC_FLAG_DMA_OK = (1 << 3),
PC_FLAG_DMA_RECOMMENDED = (1 << 4),
PC_FLAG_DMA_IN_PROGRESS = (1 << 5),
PC_FLAG_DMA_ERROR = (1 << 6),
PC_FLAG_WRITING = (1 << 7),
PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
PC_FLAG_DMA_ERROR = (1 << 5),
PC_FLAG_WRITING = (1 << 6),
/* command timed out */
PC_FLAG_TIMEDOUT = (1 << 8),
PC_FLAG_TIMEDOUT = (1 << 7),
PC_FLAG_ZIP_DRIVE = (1 << 8),
PC_FLAG_DRQ_INTERRUPT = (1 << 9),
};
struct ide_atapi_pc {
@@ -642,8 +641,8 @@ struct ide_atapi_pc {
* to change/removal later.
*/
u8 pc_buf[256];
void (*idefloppy_callback) (ide_drive_t *);
ide_startstop_t (*idetape_callback) (ide_drive_t *);
void (*callback)(ide_drive_t *);
/* idetape only */
struct idetape_bh *bh;
@@ -813,10 +812,6 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig
#ifndef _IDE_C
extern ide_hwif_t ide_hwifs[]; /* master data repository */
#endif
extern int ide_noacpi;
extern int ide_acpigtf;
extern int ide_acpionboot;
extern int noautodma;
extern int ide_vlb_clk;
extern int ide_pci_clk;
@@ -857,25 +852,12 @@ int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
extern ide_startstop_t ide_do_reset (ide_drive_t *);
extern void ide_init_drive_cmd (struct request *rq);
/*
* "action" parameter type for ide_do_drive_cmd() below.
*/
typedef enum {
ide_wait, /* insert rq at end of list, and wait for it */
ide_preempt, /* insert rq in front of current request */
ide_head_wait, /* insert rq in front of current request and wait for it */
ide_end /* insert rq at end of list, but don't wait for it */
} ide_action_t;
extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t);
extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
enum {
IDE_TFLAG_LBA48 = (1 << 0),
IDE_TFLAG_NO_SELECT_MASK = (1 << 1),
IDE_TFLAG_FLAGGED = (1 << 2),
IDE_TFLAG_OUT_DATA = (1 << 3),
IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
@@ -980,11 +962,23 @@ typedef struct ide_task_s {
void ide_tf_dump(const char *, struct ide_taskfile *);
extern void SELECT_DRIVE(ide_drive_t *);
void SELECT_MASK(ide_drive_t *, int);
extern int drive_is_ready(ide_drive_t *);
void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry,
void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *),
void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *),
void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int,
int));
ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *,
ide_handler_t *, unsigned int, ide_expiry_t *);
ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_atapi_pc *,
ide_handler_t *, unsigned int, ide_expiry_t *);
ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
void task_end_request(ide_drive_t *, struct request *, u8);
@@ -996,8 +990,6 @@ int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long);
extern int system_bus_clock(void);
extern int ide_driveid_update(ide_drive_t *);
extern int ide_config_drive_speed(ide_drive_t *, u8);
extern u8 eighty_ninty_three (ide_drive_t *);
@@ -1349,7 +1341,8 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
{
ide_hwif_t *hwif = drive->hwif;
hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2),
hwif->io_ports.ctl_addr);
}
static inline u8 ide_read_status(ide_drive_t *drive)

74
include/linux/ihex.h Normal file
View File

@@ -0,0 +1,74 @@
/*
* Compact binary representation of ihex records. Some devices need their
* firmware loaded in strange orders rather than a single big blob, but
* actually parsing ihex-as-text within the kernel seems silly. Thus,...
*/
#ifndef __LINUX_IHEX_H__
#define __LINUX_IHEX_H__
#include <linux/types.h>
#include <linux/firmware.h>
#include <linux/device.h>
/* Intel HEX files actually limit the length to 256 bytes, but we have
drivers which would benefit from using separate records which are
longer than that, so we extend to 16 bits of length */
struct ihex_binrec {
__be32 addr;
__be16 len;
uint8_t data[0];
} __attribute__((aligned(4)));
/* Find the next record, taking into account the 4-byte alignment */
static inline const struct ihex_binrec *
ihex_next_binrec(const struct ihex_binrec *rec)
{
int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
rec = (void *)&rec->data[next];
return be16_to_cpu(rec->len) ? rec : NULL;
}
/* Check that ihex_next_binrec() won't take us off the end of the image... */
static inline int ihex_validate_fw(const struct firmware *fw)
{
const struct ihex_binrec *rec;
size_t ofs = 0;
while (ofs <= fw->size - sizeof(*rec)) {
rec = (void *)&fw->data[ofs];
/* Zero length marks end of records */
if (!be16_to_cpu(rec->len))
return 0;
/* Point to next record... */
ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
}
return -EINVAL;
}
/* Request firmware and validate it so that we can trust we won't
* run off the end while reading records... */
static inline int request_ihex_firmware(const struct firmware **fw,
const char *fw_name,
struct device *dev)
{
const struct firmware *lfw;
int ret;
ret = request_firmware(&lfw, fw_name, dev);
if (ret)
return ret;
ret = ihex_validate_fw(lfw);
if (ret) {
dev_err(dev, "Firmware \"%s\" not valid IHEX records\n",
fw_name);
release_firmware(lfw);
return ret;
}
*fw = lfw;
return 0;
}
#endif /* __LINUX_IHEX_H__ */

View File

@@ -104,8 +104,11 @@ extern void enable_irq(unsigned int irq);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
extern cpumask_t irq_default_affinity;
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
#else /* CONFIG_SMP */
@@ -119,6 +122,8 @@ static inline int irq_can_set_affinity(unsigned int irq)
return 0;
}
static inline int irq_select_affinity(unsigned int irq) { return 0; }
#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
#ifdef CONFIG_GENERIC_HARDIRQS

View File

@@ -244,15 +244,6 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
}
#endif
#ifdef CONFIG_AUTO_IRQ_AFFINITY
extern int select_smp_affinity(unsigned int irq);
#else
static inline int select_smp_affinity(unsigned int irq)
{
return 1;
}
#endif
extern int no_irq_affinity;
static inline int irq_balancing_disabled(unsigned int irq)

View File

@@ -168,6 +168,8 @@ struct commit_header {
unsigned char h_chksum_size;
unsigned char h_padding[2];
__be32 h_chksum[JBD2_CHECKSUM_BYTES];
__be64 h_commit_sec;
__be32 h_commit_nsec;
};
/*
@@ -379,6 +381,38 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
bit_spin_unlock(BH_JournalHead, &bh->b_state);
}
/* Flags in jbd_inode->i_flags */
#define __JI_COMMIT_RUNNING 0
/* Commit of the inode data in progress. We use this flag to protect us from
* concurrent deletion of inode. We cannot use reference to inode for this
* since we cannot afford doing last iput() on behalf of kjournald
*/
#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING)
/**
* struct jbd_inode is the structure linking inodes in ordered mode
* present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
/* Which transaction does this inode belong to? Either the running
* transaction or the committing one. [j_list_lock] */
transaction_t *i_transaction;
/* Pointer to the running transaction modifying inode's data in case
* there is already a committing transaction touching it. [j_list_lock] */
transaction_t *i_next_transaction;
/* List of inodes in the i_transaction [j_list_lock] */
struct list_head i_list;
/* VFS inode this inode belongs to [constant during the lifetime
* of the structure] */
struct inode *i_vfs_inode;
/* Flags of inode [j_list_lock] */
unsigned int i_flags;
};
struct jbd2_revoke_table_s;
/**
@@ -508,24 +542,12 @@ struct transaction_s
*/
struct journal_head *t_reserved_list;
/*
* Doubly-linked circular list of all buffers under writeout during
* commit [j_list_lock]
*/
struct journal_head *t_locked_list;
/*
* Doubly-linked circular list of all metadata buffers owned by this
* transaction [j_list_lock]
*/
struct journal_head *t_buffers;
/*
* Doubly-linked circular list of all data buffers still to be
* flushed before this transaction can be committed [j_list_lock]
*/
struct journal_head *t_sync_datalist;
/*
* Doubly-linked circular list of all forget buffers (superseded
* buffers which we can un-checkpoint once this transaction commits)
@@ -564,6 +586,12 @@ struct transaction_s
*/
struct journal_head *t_log_list;
/*
* List of inodes whose data we've modified in data=ordered mode.
* [j_list_lock]
*/
struct list_head t_inode_list;
/*
* Protects info related to handles
*/
@@ -1004,7 +1032,6 @@ extern int jbd2_journal_extend (handle_t *, int nblocks);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
@@ -1044,6 +1071,10 @@ extern void jbd2_journal_ack_err (journal_t *);
extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
extern int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size);
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode);
/*
* journal_head management
@@ -1179,15 +1210,13 @@ static inline int jbd_space_needed(journal_t *journal)
/* journaling buffer types */
#define BJ_None 0 /* Not journaled */
#define BJ_SyncData 1 /* Normal data: flush before commit */
#define BJ_Metadata 2 /* Normal journaled metadata */
#define BJ_Forget 3 /* Buffer superseded by this transaction */
#define BJ_IO 4 /* Buffer is for temporary IO use */
#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
#define BJ_LogCtl 6 /* Buffer contains log descriptors */
#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
#define BJ_Locked 8 /* Locked for I/O during commit */
#define BJ_Types 9
#define BJ_Metadata 1 /* Normal journaled metadata */
#define BJ_Forget 2 /* Buffer superseded by this transaction */
#define BJ_IO 3 /* Buffer is for temporary IO use */
#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */
#define BJ_LogCtl 5 /* Buffer contains log descriptors */
#define BJ_Reserved 6 /* Buffer is reserved for access by journal */
#define BJ_Types 7
extern int jbd_blocks_per_page(struct inode *inode);

View File

@@ -27,6 +27,7 @@
#define __LINUX_LIBATA_H__
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
@@ -115,7 +116,7 @@ enum {
/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32,
ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
ATA_SHORT_PAUSE = (HZ >> 6) + 1,
ATA_SHORT_PAUSE = 16,
ATAPI_MAX_DRAIN = 16 << 10,
@@ -168,6 +169,7 @@ enum {
ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB,
ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */
ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -190,6 +192,10 @@ enum {
ATA_FLAG_AN = (1 << 18), /* controller supports AN */
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */
ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
* management */
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
* led */
/* The following flag belongs to ap->pflags but is kept in
* ap->flags because it's referenced in many LLDs and will be
@@ -234,17 +240,16 @@ enum {
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
ATA_TMOUT_INTERNAL = 30 * HZ,
ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
ATA_TMOUT_BOOT = 30000, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
ATA_TMOUT_INTERNAL_QUICK = 5000,
/* FIXME: GoVault needs 2s but we can't afford that without
* parallel probing. 800ms is enough for iVDR disk
* HHD424020F7SV00. Increase to 2secs when parallel probing
* is in place.
*/
ATA_TMOUT_FF_WAIT = 4 * HZ / 5,
ATA_TMOUT_FF_WAIT = 800,
/* Spec mandates to wait for ">= 2ms" before checking status
* after reset. We wait 150ms, because that was the magic
@@ -256,14 +261,14 @@ enum {
*
* Old drivers/ide uses the 2mS rule and then waits for ready.
*/
ATA_WAIT_AFTER_RESET_MSECS = 150,
ATA_WAIT_AFTER_RESET = 150,
/* If PMP is supported, we have to do follow-up SRST. As some
* PMPs don't send D2H Reg FIS after hardreset, LLDs are
* advised to wait only for the following duration before
* doing SRST.
*/
ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ,
ATA_TMOUT_PMP_SRST_WAIT = 1000,
/* ATA bus states */
BUS_UNKNOWN = 0,
@@ -340,6 +345,11 @@ enum {
SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 5,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependant */
@@ -441,6 +451,15 @@ enum link_pm {
MEDIUM_POWER,
};
extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
enum sw_activity {
OFF,
BLINK_ON,
BLINK_OFF,
};
#ifdef CONFIG_ATA_SFF
struct ata_ioports {
@@ -597,10 +616,14 @@ struct ata_eh_info {
struct ata_eh_context {
struct ata_eh_info i;
int tries[ATA_MAX_DEVICES];
int cmd_timeout_idx[ATA_MAX_DEVICES]
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask;
unsigned int saved_ncq_enabled;
u8 saved_xfer_mode[ATA_MAX_DEVICES];
/* timestamp for the last reset attempt or success */
unsigned long last_reset;
};
struct ata_acpi_drive
@@ -692,6 +715,7 @@ struct ata_port {
struct timer_list fastdrain_timer;
unsigned long fastdrain_cnt;
int em_message_type;
void *private_data;
#ifdef CONFIG_ATA_ACPI
@@ -783,6 +807,12 @@ struct ata_port_operations {
u8 (*bmdma_status)(struct ata_port *ap);
#endif /* CONFIG_ATA_SFF */
ssize_t (*em_show)(struct ata_port *ap, char *buf);
ssize_t (*em_store)(struct ata_port *ap, const char *message,
size_t size);
ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf);
ssize_t (*sw_activity_store)(struct ata_device *dev,
enum sw_activity val);
/*
* Obsolete
*/
@@ -895,8 +925,7 @@ extern void ata_host_resume(struct ata_host *host);
#endif
extern int ata_ratelimit(void);
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec,
unsigned long timeout_msec);
unsigned long interval, unsigned long timeout);
extern int atapi_cmd_type(u8 opcode);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@@ -1389,6 +1418,12 @@ static inline int ata_check_ready(u8 status)
return 0;
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
unsigned long timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
/**************************************************************************
* PMP - drivers/ata/libata-pmp.c

View File

@@ -84,65 +84,6 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
__list_add(new, head->prev, head);
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add_rcu(struct list_head * new,
struct list_head * prev, struct list_head * next)
{
new->next = next;
new->prev = prev;
smp_wmb();
next->prev = new;
prev->next = new;
}
/**
* list_add_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}
/**
* list_add_tail_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_tail_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_tail_rcu(struct list_head *new,
struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
@@ -173,36 +114,6 @@ static inline void list_del(struct list_head *entry)
extern void list_del(struct list_head *entry);
#endif
/**
* list_del_rcu - deletes entry from list without re-initialization
* @entry: the element to delete from the list.
*
* Note: list_empty() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_del_rcu()
* or list_add_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*
* Note that the caller is not permitted to immediately free
* the newly deleted entry. Instead, either synchronize_rcu()
* or call_rcu() must be used to defer freeing until an RCU
* grace period has elapsed.
*/
static inline void list_del_rcu(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = LIST_POISON2;
}
/**
* list_replace - replace old entry by new one
* @old : the element to be replaced
@@ -226,25 +137,6 @@ static inline void list_replace_init(struct list_head *old,
INIT_LIST_HEAD(old);
}
/**
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The @old entry will be replaced with the @new entry atomically.
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->prev = old->prev;
smp_wmb();
new->next->prev = new;
new->prev->next = new;
old->prev = LIST_POISON2;
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
@@ -368,62 +260,6 @@ static inline void list_splice_init(struct list_head *list,
}
}
/**
* list_splice_init_rcu - splice an RCU-protected list into an existing list.
* @list: the RCU-protected list to splice
* @head: the place in the list to splice the first list into
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*
* @head can be RCU-read traversed concurrently with this function.
*
* Note that this function blocks.
*
* Important note: the caller must take whatever action is necessary to
* prevent any other updates to @head. In principle, it is possible
* to modify the list as soon as sync() begins execution.
* If this sort of thing becomes necessary, an alternative version
* based on call_rcu() could be created. But only if -really-
* needed -- there is no shortage of RCU API members.
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
if (list_empty(head))
return;
/* "first" and "last" tracking list, so initialize it. */
INIT_LIST_HEAD(list);
/*
* At this point, the list body still points to the source list.
* Wait for any readers to finish using the list before splicing
* the list body into the new list. Any new readers will see
* an empty list.
*/
sync();
/*
* Readers are finished with the source list, so perform splice.
* The order is important if the new list is global and accessible
* to concurrent RCU readers. Note that RCU readers are not
* permitted to traverse the prev pointers without excluding
* this function.
*/
last->next = at;
smp_wmb();
head->next = first;
first->prev = head;
at->prev = last;
}
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
@@ -629,57 +465,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
/**
* list_for_each_rcu - iterate over an rcu-protected list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
prefetch(pos->next), pos != (head); \
pos = rcu_dereference(pos->next))
#define __list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
pos != (head); \
pos = rcu_dereference(pos->next))
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_entry_rcu(pos, head, member) \
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
/**
* list_for_each_continue_rcu
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* Iterate over an rcu-protected list, continuing after current point.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_continue_rcu(pos, head) \
for ((pos) = rcu_dereference((pos)->next); \
prefetch((pos)->next), (pos) != (head); \
(pos) = rcu_dereference((pos)->next))
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
@@ -730,31 +515,6 @@ static inline void hlist_del(struct hlist_node *n)
n->pprev = LIST_POISON2;
}
/**
* hlist_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
* Note: list_unhashed() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the hash list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry().
*/
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
n->pprev = LIST_POISON2;
}
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
@@ -763,27 +523,6 @@ static inline void hlist_del_init(struct hlist_node *n)
}
}
/**
* hlist_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The @old entry will be replaced with the @new entry atomically.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
{
struct hlist_node *next = old->next;
new->next = next;
new->pprev = old->pprev;
smp_wmb();
if (next)
new->next->pprev = &new->next;
*new->pprev = new;
old->pprev = LIST_POISON2;
}
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
@@ -794,38 +533,6 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
n->pprev = &h->first;
}
/**
* hlist_add_head_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
n->pprev = &h->first;
smp_wmb();
if (first)
first->pprev = &n->next;
h->first = n;
}
/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
@@ -847,63 +554,6 @@ static inline void hlist_add_after(struct hlist_node *n,
next->next->pprev = &next->next;
}
/**
* hlist_add_before_rcu
* @n: the new element to add to the hash list.
* @next: the existing element to add the new element before.
*
* Description:
* Adds the specified element to the specified hlist
* before the specified node while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
smp_wmb();
next->pprev = &n->next;
*(n->pprev) = n;
}
/**
* hlist_add_after_rcu
* @prev: the existing element to add the new element after.
* @n: the new element to add to the hash list.
*
* Description:
* Adds the specified element to the specified hlist
* after the specified node while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_after_rcu(struct hlist_node *prev,
struct hlist_node *n)
{
n->next = prev->next;
n->pprev = &prev->next;
smp_wmb();
prev->next = n;
if (n->next)
n->next->pprev = &n->next;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
@@ -964,21 +614,4 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
for (pos = rcu_dereference((head)->first); \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = rcu_dereference(pos->next))
#endif

View File

@@ -122,11 +122,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
*/
#define LM_OUT_ST_MASK 0x00000003
#define LM_OUT_CACHEABLE 0x00000004
#define LM_OUT_CANCELED 0x00000008
#define LM_OUT_ASYNC 0x00000080
#define LM_OUT_ERROR 0x00000100
#define LM_OUT_CONV_DEADLK 0x00000200
/*
* lm_callback_t types
@@ -138,9 +136,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
* LM_CB_NEED_RECOVERY
* The given journal needs to be recovered.
*
* LM_CB_DROPLOCKS
* Reduce the number of cached locks.
*
* LM_CB_ASYNC
* The given lock has been granted.
*/
@@ -149,7 +144,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
#define LM_CB_NEED_D 258
#define LM_CB_NEED_S 259
#define LM_CB_NEED_RECOVERY 260
#define LM_CB_DROPLOCKS 261
#define LM_CB_ASYNC 262
/*

View File

@@ -398,7 +398,8 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,

View File

@@ -11,11 +11,21 @@
*/
#ifdef CONFIG_BLOCK
struct mpage_data {
struct bio *bio;
sector_t last_block_in_bio;
get_block_t *get_block;
unsigned use_writepage;
};
struct writeback_control;
struct bio *mpage_bio_submit(int rw, struct bio *bio);
int mpage_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, get_block_t get_block);
int mpage_readpage(struct page *page, get_block_t get_block);
int __mpage_writepage(struct page *page, struct writeback_control *wbc,
void *data);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
int mpage_writepage(struct page *page, get_block_t *get_block,

View File

@@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
s64 ret = __percpu_counter_sum(fbc, 0);
return ret < 0 ? 0 : ret;
}
static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc, 1);
}
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc);
return __percpu_counter_sum(fbc, 0);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)

View File

@@ -151,7 +151,10 @@ extern struct lockdep_map rcu_lock_map;
#define __synchronize_sched() synchronize_rcu()
#define call_rcu_sched(head, func) call_rcu(head, func)
extern void __rcu_init(void);
#define rcu_init_sched() do { } while (0)
extern void rcu_check_callbacks(int cpu, int user);
extern void rcu_restart_cpu(int cpu);

View File

@@ -1,6 +1,373 @@
#ifndef _LINUX_RCULIST_H
#define _LINUX_RCULIST_H
#include <linux/list.h>
#ifdef __KERNEL__
#endif /* _LINUX_RCULIST_H */
/*
* RCU-protected list version
*/
#include <linux/list.h>
#include <linux/rcupdate.h>
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
new->next = next;
new->prev = prev;
rcu_assign_pointer(prev->next, new);
next->prev = new;
}
/**
* list_add_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}
/**
* list_add_tail_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_add_tail_rcu()
* or list_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*/
static inline void list_add_tail_rcu(struct list_head *new,
struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}
/**
* list_del_rcu - deletes entry from list without re-initialization
* @entry: the element to delete from the list.
*
* Note: list_empty() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as list_del_rcu()
* or list_add_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* list_for_each_entry_rcu().
*
* Note that the caller is not permitted to immediately free
* the newly deleted entry. Instead, either synchronize_rcu()
* or call_rcu() must be used to defer freeing until an RCU
* grace period has elapsed.
*/
static inline void list_del_rcu(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = LIST_POISON2;
}
/**
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The @old entry will be replaced with the @new entry atomically.
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->prev = old->prev;
rcu_assign_pointer(new->prev->next, new);
new->next->prev = new;
old->prev = LIST_POISON2;
}
/**
* list_splice_init_rcu - splice an RCU-protected list into an existing list.
* @list: the RCU-protected list to splice
* @head: the place in the list to splice the first list into
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*
* @head can be RCU-read traversed concurrently with this function.
*
* Note that this function blocks.
*
* Important note: the caller must take whatever action is necessary to
* prevent any other updates to @head. In principle, it is possible
* to modify the list as soon as sync() begins execution.
* If this sort of thing becomes necessary, an alternative version
* based on call_rcu() could be created. But only if -really-
* needed -- there is no shortage of RCU API members.
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
if (list_empty(head))
return;
/* "first" and "last" tracking list, so initialize it. */
INIT_LIST_HEAD(list);
/*
* At this point, the list body still points to the source list.
* Wait for any readers to finish using the list before splicing
* the list body into the new list. Any new readers will see
* an empty list.
*/
sync();
/*
* Readers are finished with the source list, so perform splice.
* The order is important if the new list is global and accessible
* to concurrent RCU readers. Note that RCU readers are not
* permitted to traverse the prev pointers without excluding
* this function.
*/
last->next = at;
rcu_assign_pointer(head->next, first);
first->prev = head;
at->prev = last;
}
/**
* list_for_each_rcu - iterate over an rcu-protected list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
prefetch(pos->next), pos != (head); \
pos = rcu_dereference(pos->next))
#define __list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
pos != (head); \
pos = rcu_dereference(pos->next))
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_entry_rcu(pos, head, member) \
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
/**
* list_for_each_continue_rcu
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* Iterate over an rcu-protected list, continuing after current point.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_continue_rcu(pos, head) \
for ((pos) = rcu_dereference((pos)->next); \
prefetch((pos)->next), (pos) != (head); \
(pos) = rcu_dereference((pos)->next))
/**
* hlist_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
* Note: list_unhashed() on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*
* In particular, it means that we can not poison the forward
* pointers that may still be used for walking the hash list.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry().
*/
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
n->pprev = LIST_POISON2;
}
/**
* hlist_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The @old entry will be replaced with the @new entry atomically.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
{
struct hlist_node *next = old->next;
new->next = next;
new->pprev = old->pprev;
rcu_assign_pointer(*new->pprev, new);
if (next)
new->next->pprev = &new->next;
old->pprev = LIST_POISON2;
}
/**
* hlist_add_head_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
n->pprev = &h->first;
rcu_assign_pointer(h->first, n);
if (first)
first->pprev = &n->next;
}
/**
* hlist_add_before_rcu
* @n: the new element to add to the hash list.
* @next: the existing element to add the new element before.
*
* Description:
* Adds the specified element to the specified hlist
* before the specified node while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
rcu_assign_pointer(*(n->pprev), n);
next->pprev = &n->next;
}
/**
* hlist_add_after_rcu
* @prev: the existing element to add the new element after.
* @n: the new element to add to the hash list.
*
* Description:
* Adds the specified element to the specified hlist
* after the specified node while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_add_head_rcu()
* or hlist_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_after_rcu(struct hlist_node *prev,
struct hlist_node *n)
{
n->next = prev->next;
n->pprev = &prev->next;
rcu_assign_pointer(prev->next, n);
if (n->next)
n->next->pprev = &n->next;
}
/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
for (pos = rcu_dereference((head)->first); \
pos && ({ prefetch(pos->next); 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference(pos->next))
#endif /* __KERNEL__ */
#endif

View File

@@ -40,6 +40,7 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
#include <linux/lockdep.h>
#include <linux/completion.h>
/**
* struct rcu_head - callback structure for use with RCU
@@ -168,6 +169,27 @@ struct rcu_head {
(p) = (v); \
})
/* Infrastructure to implement the synchronize_() primitives. */
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
};
extern void wakeme_after_rcu(struct rcu_head *head);
#define synchronize_rcu_xxx(name, func) \
void name(void) \
{ \
struct rcu_synchronize rcu; \
\
init_completion(&rcu.completion); \
/* Will wake me after RCU finished. */ \
func(&rcu.head, wakeme_after_rcu); \
/* Wait for it. */ \
wait_for_completion(&rcu.completion); \
}
/**
* synchronize_sched - block until all CPUs have exited any non-preemptive
* kernel code sequences.
@@ -224,8 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head,
/* Exported common interfaces */
extern void synchronize_rcu(void);
extern void rcu_barrier(void);
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
/* Internal to kernel */
extern void rcu_init(void);

View File

@@ -40,10 +40,39 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
#define rcu_qsctr_inc(cpu)
struct rcu_dyntick_sched {
int dynticks;
int dynticks_snap;
int sched_qs;
int sched_qs_snap;
int sched_dynticks_snap;
};
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
static inline void rcu_qsctr_inc(int cpu)
{
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
rdssp->sched_qs++;
}
#define rcu_bh_qsctr_inc(cpu)
#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
/**
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual update function to be invoked after the grace period
*
* The update function will be invoked some time after a full
* synchronize_sched()-style grace period elapses, in other words after
* all currently executing preempt-disabled sections of code (including
* hardirq handlers, NMI handlers, and local_irq_save() blocks) have
* completed.
*/
extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *head));
extern void __rcu_read_lock(void) __acquires(RCU);
extern void __rcu_read_unlock(void) __releases(RCU);
extern int rcu_pending(int cpu);
@@ -55,6 +84,7 @@ extern int rcu_needs_cpu(int cpu);
extern void __synchronize_sched(void);
extern void __rcu_init(void);
extern void rcu_init_sched(void);
extern void rcu_check_callbacks(int cpu, int user);
extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void);
@@ -81,20 +111,20 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
struct softirq_action;
#ifdef CONFIG_NO_HZ
DECLARE_PER_CPU(long, dynticks_progress_counter);
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
static inline void rcu_enter_nohz(void)
{
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
__get_cpu_var(dynticks_progress_counter)++;
WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1);
__get_cpu_var(rcu_dyntick_sched).dynticks++;
WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1);
}
static inline void rcu_exit_nohz(void)
{
__get_cpu_var(dynticks_progress_counter)++;
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1));
__get_cpu_var(rcu_dyntick_sched).dynticks++;
WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1));
}
#else /* CONFIG_NO_HZ */

View File

@@ -7,9 +7,18 @@
*/
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/cpumask.h>
extern void cpu_idle(void);
struct call_single_data {
struct list_head list;
void (*func) (void *info);
void *info;
unsigned int flags;
};
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
int smp_call_function(void(*func)(void *info), void *info, int wait);
int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int retry, int wait);
int wait);
void __smp_call_function_single(int cpuid, struct call_single_data *data);
/*
* Generic and arch helpers
*/
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
void generic_smp_call_function_single_interrupt(void);
void generic_smp_call_function_interrupt(void);
void init_call_single_data(void);
void ipi_call_lock(void);
void ipi_call_unlock(void);
void ipi_call_lock_irq(void);
void ipi_call_unlock_irq(void);
#else
static inline void init_call_single_data(void)
{
}
#endif
/*
* Call a function on all processors
*/
int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
int on_each_cpu(void (*func) (void *info), void *info, int wait);
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001
@@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
{
return 0;
}
#define smp_call_function(func, info, retry, wait) \
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
#define on_each_cpu(func,info,retry,wait) \
#define on_each_cpu(func,info,wait) \
({ \
local_irq_disable(); \
func(info); \
@@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_single(cpuid, func, info, retry, wait) \
#define smp_call_function_single(cpuid, func, info, wait) \
({ \
WARN_ON(cpuid != 0); \
local_irq_disable(); \
@@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void init_call_single_data(void)
{
}
#endif /* !SMP */
/*

View File

@@ -179,4 +179,17 @@ void arch_update_cpu_topology(void);
#endif
#endif /* CONFIG_NUMA */
#ifndef topology_physical_package_id
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
#ifndef topology_thread_siblings
#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
#endif
#ifndef topology_core_siblings
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
#endif
#endif /* _LINUX_TOPOLOGY_H */

View File

@@ -63,6 +63,7 @@ struct writeback_control {
unsigned for_writepages:1; /* This is a writepages() call */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned more_io:1; /* more io to be dispatched */
unsigned range_cont:1;
};
/*