Merge Linus' tree to be be to apply submitted patches to newer code than
current trivial.git base
This commit is contained in:
@@ -24,14 +24,10 @@ struct super_block;
|
||||
struct pacct_struct;
|
||||
struct pid_namespace;
|
||||
extern int acct_parm[]; /* for sysctl */
|
||||
extern void acct_auto_close_mnt(struct vfsmount *m);
|
||||
extern void acct_auto_close(struct super_block *sb);
|
||||
extern void acct_collect(long exitcode, int group_dead);
|
||||
extern void acct_process(void);
|
||||
extern void acct_exit_ns(struct pid_namespace *);
|
||||
#else
|
||||
#define acct_auto_close_mnt(x) do { } while (0)
|
||||
#define acct_auto_close(x) do { } while (0)
|
||||
#define acct_collect(x,y) do { } while (0)
|
||||
#define acct_process() do { } while (0)
|
||||
#define acct_exit_ns(ns) do { } while (0)
|
||||
|
@@ -364,6 +364,17 @@ extern bool osc_sb_apei_support_acked;
|
||||
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
|
||||
#define OSC_PCI_CONTROL_MASKS 0x0000001f
|
||||
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
|
||||
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
|
||||
|
||||
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
||||
u32 *mask, u32 req);
|
||||
|
||||
@@ -421,6 +432,7 @@ static inline bool acpi_driver_match_device(struct device *dev,
|
||||
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
|
||||
int acpi_device_modalias(struct device *, char *, int);
|
||||
|
||||
struct platform_device *acpi_create_platform_device(struct acpi_device *);
|
||||
#define ACPI_PTR(_ptr) (_ptr)
|
||||
|
||||
#else /* !CONFIG_ACPI */
|
||||
@@ -576,7 +588,6 @@ static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
|
||||
struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
|
||||
int acpi_dev_pm_attach(struct device *dev, bool power_on);
|
||||
void acpi_dev_pm_detach(struct device *dev, bool power_off);
|
||||
#else
|
||||
static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
|
||||
{
|
||||
@@ -586,7 +597,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
@@ -7,6 +7,8 @@
|
||||
#ifndef _AER_H_
|
||||
#define _AER_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define AER_NONFATAL 0
|
||||
#define AER_FATAL 1
|
||||
#define AER_CORRECTABLE 2
|
||||
|
@@ -22,19 +22,6 @@ struct ata_port_info;
|
||||
struct ahci_host_priv;
|
||||
struct platform_device;
|
||||
|
||||
/*
|
||||
* Note ahci_platform_data is deprecated, it is only kept around for use
|
||||
* by the old da850 and spear13xx ahci code.
|
||||
* New drivers should instead declare their own platform_driver struct, and
|
||||
* use ahci_platform* functions in their own probe, suspend and resume methods.
|
||||
*/
|
||||
struct ahci_platform_data {
|
||||
int (*init)(struct device *dev, void __iomem *addr);
|
||||
void (*exit)(struct device *dev);
|
||||
int (*suspend)(struct device *dev);
|
||||
int (*resume)(struct device *dev);
|
||||
};
|
||||
|
||||
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
|
||||
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
|
||||
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
|
||||
|
@@ -44,10 +44,15 @@ struct amba_driver {
|
||||
const struct amba_id *id_table;
|
||||
};
|
||||
|
||||
/*
|
||||
* Constants for the designer field of the Peripheral ID register. When bit 7
|
||||
* is set to '1', bits [6:0] should be the JEP106 manufacturer identity code.
|
||||
*/
|
||||
enum amba_vendor {
|
||||
AMBA_VENDOR_ARM = 0x41,
|
||||
AMBA_VENDOR_ST = 0x80,
|
||||
AMBA_VENDOR_QCOM = 0x51,
|
||||
AMBA_VENDOR_LSI = 0xb6,
|
||||
};
|
||||
|
||||
extern struct bus_type amba_bustype;
|
||||
|
@@ -8,11 +8,6 @@ struct pata_platform_info {
|
||||
* spacing used by ata_std_ports().
|
||||
*/
|
||||
unsigned int ioport_shift;
|
||||
/*
|
||||
* Indicate platform specific irq types and initial
|
||||
* IRQ flags when call request_irq()
|
||||
*/
|
||||
unsigned int irq_flags;
|
||||
};
|
||||
|
||||
extern int __pata_platform_probe(struct device *dev,
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#ifndef __LINUX_ATMEL_MCI_H
|
||||
#define __LINUX_ATMEL_MCI_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ATMCI_MAX_NR_SLOTS 2
|
||||
|
||||
/**
|
||||
|
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2007 Atmel Corporation
|
||||
*
|
||||
* Driver for the AT32AP700X PS/2 controller (PSIF).
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __INCLUDE_ATMEL_PWM_BL_H
|
||||
#define __INCLUDE_ATMEL_PWM_BL_H
|
||||
|
||||
/**
|
||||
* struct atmel_pwm_bl_platform_data
|
||||
* @pwm_channel: which PWM channel in the PWM module to use.
|
||||
* @pwm_frequency: PWM frequency to generate, the driver will try to be as
|
||||
* close as the prescaler allows.
|
||||
* @pwm_compare_max: value to use in the PWM channel compare register.
|
||||
* @pwm_duty_max: maximum duty cycle value, must be less than or equal to
|
||||
* pwm_compare_max.
|
||||
* @pwm_duty_min: minimum duty cycle value, must be less than pwm_duty_max.
|
||||
* @pwm_active_low: set to one if the low part of the PWM signal increases the
|
||||
* brightness of the backlight.
|
||||
* @gpio_on: GPIO line to control the backlight on/off, set to -1 if not used.
|
||||
* @on_active_low: set to one if the on/off signal is on when GPIO is low.
|
||||
*
|
||||
* This struct must be added to the platform device in the board code. It is
|
||||
* used by the atmel-pwm-bl driver to setup the GPIO to control on/off and the
|
||||
* PWM device.
|
||||
*/
|
||||
struct atmel_pwm_bl_platform_data {
|
||||
unsigned int pwm_channel;
|
||||
unsigned int pwm_frequency;
|
||||
unsigned int pwm_compare_max;
|
||||
unsigned int pwm_duty_max;
|
||||
unsigned int pwm_duty_min;
|
||||
unsigned int pwm_active_low;
|
||||
int gpio_on;
|
||||
unsigned int on_active_low;
|
||||
};
|
||||
|
||||
#endif /* __INCLUDE_ATMEL_PWM_BL_H */
|
@@ -1,70 +0,0 @@
|
||||
#ifndef __LINUX_ATMEL_PWM_H
|
||||
#define __LINUX_ATMEL_PWM_H
|
||||
|
||||
/**
|
||||
* struct pwm_channel - driver handle to a PWM channel
|
||||
* @regs: base of this channel's registers
|
||||
* @index: number of this channel (0..31)
|
||||
* @mck: base clock rate, which can be prescaled and maybe subdivided
|
||||
*
|
||||
* Drivers initialize a pwm_channel structure using pwm_channel_alloc().
|
||||
* Then they configure its clock rate (derived from MCK), alignment,
|
||||
* polarity, and duty cycle by writing directly to the channel registers,
|
||||
* before enabling the channel by calling pwm_channel_enable().
|
||||
*
|
||||
* After emitting a PWM signal for the desired length of time, drivers
|
||||
* may then pwm_channel_disable() or pwm_channel_free(). Both of these
|
||||
* disable the channel, but when it's freed the IRQ is deconfigured and
|
||||
* the channel must later be re-allocated and reconfigured.
|
||||
*
|
||||
* Note that if the period or duty cycle need to be changed while the
|
||||
* PWM channel is operating, drivers must use the PWM_CUPD double buffer
|
||||
* mechanism, either polling until they change or getting implicitly
|
||||
* notified through a once-per-period interrupt handler.
|
||||
*/
|
||||
struct pwm_channel {
|
||||
void __iomem *regs;
|
||||
unsigned index;
|
||||
unsigned long mck;
|
||||
};
|
||||
|
||||
extern int pwm_channel_alloc(int index, struct pwm_channel *ch);
|
||||
extern int pwm_channel_free(struct pwm_channel *ch);
|
||||
|
||||
extern int pwm_clk_alloc(unsigned prescale, unsigned div);
|
||||
extern void pwm_clk_free(unsigned clk);
|
||||
|
||||
extern int __pwm_channel_onoff(struct pwm_channel *ch, int enabled);
|
||||
|
||||
#define pwm_channel_enable(ch) __pwm_channel_onoff((ch), 1)
|
||||
#define pwm_channel_disable(ch) __pwm_channel_onoff((ch), 0)
|
||||
|
||||
/* periodic interrupts, mostly for CUPD changes to period or cycle */
|
||||
extern int pwm_channel_handler(struct pwm_channel *ch,
|
||||
void (*handler)(struct pwm_channel *ch));
|
||||
|
||||
/* per-channel registers (banked at pwm_channel->regs) */
|
||||
#define PWM_CMR 0x00 /* mode register */
|
||||
#define PWM_CPR_CPD (1 << 10) /* set: CUPD modifies period */
|
||||
#define PWM_CPR_CPOL (1 << 9) /* set: idle high */
|
||||
#define PWM_CPR_CALG (1 << 8) /* set: center align */
|
||||
#define PWM_CPR_CPRE (0xf << 0) /* mask: rate is mck/(2^pre) */
|
||||
#define PWM_CPR_CLKA (0xb << 0) /* rate CLKA */
|
||||
#define PWM_CPR_CLKB (0xc << 0) /* rate CLKB */
|
||||
#define PWM_CDTY 0x04 /* duty cycle (max of CPRD) */
|
||||
#define PWM_CPRD 0x08 /* period (count up from zero) */
|
||||
#define PWM_CCNT 0x0c /* counter (20 bits?) */
|
||||
#define PWM_CUPD 0x10 /* update CPRD (or CDTY) next period */
|
||||
|
||||
static inline void
|
||||
pwm_channel_writel(struct pwm_channel *pwmc, unsigned offset, u32 val)
|
||||
{
|
||||
__raw_writel(val, pwmc->regs + offset);
|
||||
}
|
||||
|
||||
static inline u32 pwm_channel_readl(struct pwm_channel *pwmc, unsigned offset)
|
||||
{
|
||||
return __raw_readl(pwmc->regs + offset);
|
||||
}
|
||||
|
||||
#endif /* __LINUX_ATMEL_PWM_H */
|
@@ -44,12 +44,13 @@ struct atmel_tcb_config {
|
||||
/**
|
||||
* struct atmel_tc - information about a Timer/Counter Block
|
||||
* @pdev: physical device
|
||||
* @iomem: resource associated with the I/O register
|
||||
* @regs: mapping through which the I/O registers can be accessed
|
||||
* @id: block id
|
||||
* @tcb_config: configuration data from SoC
|
||||
* @irq: irq for each of the three channels
|
||||
* @clk: internal clock source for each of the three channels
|
||||
* @node: list node, for tclib internal use
|
||||
* @allocated: if already used, for tclib internal use
|
||||
*
|
||||
* On some platforms, each TC channel has its own clocks and IRQs,
|
||||
* while on others, all TC channels share the same clock and IRQ.
|
||||
@@ -61,15 +62,16 @@ struct atmel_tcb_config {
|
||||
*/
|
||||
struct atmel_tc {
|
||||
struct platform_device *pdev;
|
||||
struct resource *iomem;
|
||||
void __iomem *regs;
|
||||
int id;
|
||||
const struct atmel_tcb_config *tcb_config;
|
||||
int irq[3];
|
||||
struct clk *clk[3];
|
||||
struct list_head node;
|
||||
bool allocated;
|
||||
};
|
||||
|
||||
extern struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name);
|
||||
extern struct atmel_tc *atmel_tc_alloc(unsigned block);
|
||||
extern void atmel_tc_free(struct atmel_tc *tc);
|
||||
|
||||
/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
|
||||
@@ -258,5 +260,10 @@ extern const u8 atmel_tc_divisors[5];
|
||||
#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
|
||||
#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
|
||||
#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
|
||||
#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
|
||||
ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
|
||||
ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
|
||||
ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
|
||||
/* all IRQs */
|
||||
|
||||
#endif
|
||||
|
@@ -3,42 +3,6 @@
|
||||
#define _LINUX_ATOMIC_H
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Provide __deprecated wrappers for the new interface, avoid flag day changes.
|
||||
* We need the ugly external functions to break header recursion hell.
|
||||
*/
|
||||
#ifndef smp_mb__before_atomic_inc
|
||||
static inline void __deprecated smp_mb__before_atomic_inc(void)
|
||||
{
|
||||
extern void __smp_mb__before_atomic(void);
|
||||
__smp_mb__before_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic_inc
|
||||
static inline void __deprecated smp_mb__after_atomic_inc(void)
|
||||
{
|
||||
extern void __smp_mb__after_atomic(void);
|
||||
__smp_mb__after_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__before_atomic_dec
|
||||
static inline void __deprecated smp_mb__before_atomic_dec(void)
|
||||
{
|
||||
extern void __smp_mb__before_atomic(void);
|
||||
__smp_mb__before_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic_dec
|
||||
static inline void __deprecated smp_mb__after_atomic_dec(void)
|
||||
{
|
||||
extern void __smp_mb__after_atomic(void);
|
||||
__smp_mb__after_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
|
@@ -66,12 +66,16 @@ struct audit_krule {
|
||||
|
||||
struct audit_field {
|
||||
u32 type;
|
||||
u32 val;
|
||||
kuid_t uid;
|
||||
kgid_t gid;
|
||||
union {
|
||||
u32 val;
|
||||
kuid_t uid;
|
||||
kgid_t gid;
|
||||
struct {
|
||||
char *lsm_str;
|
||||
void *lsm_rule;
|
||||
};
|
||||
};
|
||||
u32 op;
|
||||
char *lsm_str;
|
||||
void *lsm_rule;
|
||||
};
|
||||
|
||||
extern int is_audit_feature_set(int which);
|
||||
@@ -86,7 +90,7 @@ extern unsigned compat_dir_class[];
|
||||
extern unsigned compat_chattr_class[];
|
||||
extern unsigned compat_signal_class[];
|
||||
|
||||
extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
|
||||
extern int audit_classify_compat_syscall(int abi, unsigned syscall);
|
||||
|
||||
/* audit_names->type values */
|
||||
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
|
||||
@@ -109,12 +113,13 @@ extern void audit_log_session_info(struct audit_buffer *ab);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
#include <asm/syscall.h> /* for syscall_get_arch() */
|
||||
|
||||
/* These are defined in auditsc.c */
|
||||
/* Public API */
|
||||
extern int audit_alloc(struct task_struct *task);
|
||||
extern void __audit_free(struct task_struct *task);
|
||||
extern void __audit_syscall_entry(int arch,
|
||||
int major, unsigned long a0, unsigned long a1,
|
||||
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3);
|
||||
extern void __audit_syscall_exit(int ret_success, long ret_value);
|
||||
extern struct filename *__audit_reusename(const __user char *uptr);
|
||||
@@ -141,12 +146,12 @@ static inline void audit_free(struct task_struct *task)
|
||||
if (unlikely(task->audit_context))
|
||||
__audit_free(task);
|
||||
}
|
||||
static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
|
||||
static inline void audit_syscall_entry(int major, unsigned long a0,
|
||||
unsigned long a1, unsigned long a2,
|
||||
unsigned long a3)
|
||||
{
|
||||
if (unlikely(current->audit_context))
|
||||
__audit_syscall_entry(arch, major, a0, a1, a2, a3);
|
||||
__audit_syscall_entry(major, a0, a1, a2, a3);
|
||||
}
|
||||
static inline void audit_syscall_exit(void *pt_regs)
|
||||
{
|
||||
@@ -322,7 +327,7 @@ static inline int audit_alloc(struct task_struct *task)
|
||||
}
|
||||
static inline void audit_free(struct task_struct *task)
|
||||
{ }
|
||||
static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
|
||||
static inline void audit_syscall_entry(int major, unsigned long a0,
|
||||
unsigned long a1, unsigned long a2,
|
||||
unsigned long a3)
|
||||
{ }
|
||||
|
@@ -28,12 +28,10 @@ struct dentry;
|
||||
* Bits in backing_dev_info.state
|
||||
*/
|
||||
enum bdi_state {
|
||||
BDI_wb_alloc, /* Default embedded wb allocated */
|
||||
BDI_async_congested, /* The async (write) queue is getting full */
|
||||
BDI_sync_congested, /* The sync queue is getting full */
|
||||
BDI_registered, /* bdi_register() was done */
|
||||
BDI_writeback_running, /* Writeback is in progress */
|
||||
BDI_unused, /* Available bits start here */
|
||||
};
|
||||
|
||||
typedef int (congested_fn)(void *, int);
|
||||
@@ -50,7 +48,6 @@ enum bdi_stat_item {
|
||||
|
||||
struct bdi_writeback {
|
||||
struct backing_dev_info *bdi; /* our parent bdi */
|
||||
unsigned int nr;
|
||||
|
||||
unsigned long last_old_flush; /* last old data flush */
|
||||
|
||||
@@ -124,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
|
||||
void bdi_writeback_workfn(struct work_struct *work);
|
||||
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
||||
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
|
||||
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
|
||||
|
||||
extern spinlock_t bdi_lock;
|
||||
extern struct list_head bdi_list;
|
||||
|
@@ -27,10 +27,13 @@
|
||||
* counter raised only while it is under our special handling;
|
||||
*
|
||||
* iii. after the lockless scan step have selected a potential balloon page for
|
||||
* isolation, re-test the page->mapping flags and the page ref counter
|
||||
* isolation, re-test the PageBalloon mark and the PagePrivate flag
|
||||
* under the proper page lock, to ensure isolating a valid balloon page
|
||||
* (not yet isolated, nor under release procedure)
|
||||
*
|
||||
* iv. isolation or dequeueing procedure must clear PagePrivate flag under
|
||||
* page lock together with removing page from balloon device page list.
|
||||
*
|
||||
* The functions provided by this interface are placed to help on coping with
|
||||
* the aforementioned balloon page corner case, as well as to ensure the simple
|
||||
* set of exposed rules are satisfied while we are dealing with balloon pages
|
||||
@@ -54,43 +57,22 @@
|
||||
* balloon driver as a page book-keeper for its registered balloon devices.
|
||||
*/
|
||||
struct balloon_dev_info {
|
||||
void *balloon_device; /* balloon device descriptor */
|
||||
struct address_space *mapping; /* balloon special page->mapping */
|
||||
unsigned long isolated_pages; /* # of isolated pages for migration */
|
||||
spinlock_t pages_lock; /* Protection to pages list */
|
||||
struct list_head pages; /* Pages enqueued & handled to Host */
|
||||
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
|
||||
struct page *page, enum migrate_mode mode);
|
||||
};
|
||||
|
||||
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
|
||||
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
|
||||
extern struct balloon_dev_info *balloon_devinfo_alloc(
|
||||
void *balloon_dev_descriptor);
|
||||
|
||||
static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info)
|
||||
static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
|
||||
{
|
||||
kfree(b_dev_info);
|
||||
}
|
||||
|
||||
/*
|
||||
* balloon_page_free - release a balloon page back to the page free lists
|
||||
* @page: ballooned page to be set free
|
||||
*
|
||||
* This function must be used to properly set free an isolated/dequeued balloon
|
||||
* page at the end of a sucessful page migration, or at the balloon driver's
|
||||
* page release procedure.
|
||||
*/
|
||||
static inline void balloon_page_free(struct page *page)
|
||||
{
|
||||
/*
|
||||
* Balloon pages always get an extra refcount before being isolated
|
||||
* and before being dequeued to help on sorting out fortuite colisions
|
||||
* between a thread attempting to isolate and another thread attempting
|
||||
* to release the very same balloon page.
|
||||
*
|
||||
* Before we handle the page back to Buddy, lets drop its extra refcnt.
|
||||
*/
|
||||
put_page(page);
|
||||
__free_page(page);
|
||||
balloon->isolated_pages = 0;
|
||||
spin_lock_init(&balloon->pages_lock);
|
||||
INIT_LIST_HEAD(&balloon->pages);
|
||||
balloon->migratepage = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BALLOON_COMPACTION
|
||||
@@ -98,107 +80,58 @@ extern bool balloon_page_isolate(struct page *page);
|
||||
extern void balloon_page_putback(struct page *page);
|
||||
extern int balloon_page_migrate(struct page *newpage,
|
||||
struct page *page, enum migrate_mode mode);
|
||||
extern struct address_space
|
||||
*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
|
||||
const struct address_space_operations *a_ops);
|
||||
|
||||
static inline void balloon_mapping_free(struct address_space *balloon_mapping)
|
||||
{
|
||||
kfree(balloon_mapping);
|
||||
}
|
||||
|
||||
/*
|
||||
* page_flags_cleared - helper to perform balloon @page ->flags tests.
|
||||
*
|
||||
* As balloon pages are obtained from buddy and we do not play with page->flags
|
||||
* at driver level (exception made when we get the page lock for compaction),
|
||||
* we can safely identify a ballooned page by checking if the
|
||||
* PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also
|
||||
* helps us skip ballooned pages that are locked for compaction or release, thus
|
||||
* mitigating their racy check at balloon_page_movable()
|
||||
*/
|
||||
static inline bool page_flags_cleared(struct page *page)
|
||||
{
|
||||
return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP);
|
||||
}
|
||||
|
||||
/*
|
||||
* __is_movable_balloon_page - helper to perform @page mapping->flags tests
|
||||
* __is_movable_balloon_page - helper to perform @page PageBalloon tests
|
||||
*/
|
||||
static inline bool __is_movable_balloon_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
return mapping_balloon(mapping);
|
||||
return PageBalloon(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* balloon_page_movable - test page->mapping->flags to identify balloon pages
|
||||
* that can be moved by compaction/migration.
|
||||
*
|
||||
* This function is used at core compaction's page isolation scheme, therefore
|
||||
* most pages exposed to it are not enlisted as balloon pages and so, to avoid
|
||||
* undesired side effects like racing against __free_pages(), we cannot afford
|
||||
* holding the page locked while testing page->mapping->flags here.
|
||||
* balloon_page_movable - test PageBalloon to identify balloon pages
|
||||
* and PagePrivate to check that the page is not
|
||||
* isolated and can be moved by compaction/migration.
|
||||
*
|
||||
* As we might return false positives in the case of a balloon page being just
|
||||
* released under us, the page->mapping->flags need to be re-tested later,
|
||||
* under the proper page lock, at the functions that will be coping with the
|
||||
* balloon page case.
|
||||
* released under us, this need to be re-tested later, under the page lock.
|
||||
*/
|
||||
static inline bool balloon_page_movable(struct page *page)
|
||||
{
|
||||
/*
|
||||
* Before dereferencing and testing mapping->flags, let's make sure
|
||||
* this is not a page that uses ->mapping in a different way
|
||||
*/
|
||||
if (page_flags_cleared(page) && !page_mapped(page) &&
|
||||
page_count(page) == 1)
|
||||
return __is_movable_balloon_page(page);
|
||||
|
||||
return false;
|
||||
return PageBalloon(page) && PagePrivate(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* isolated_balloon_page - identify an isolated balloon page on private
|
||||
* compaction/migration page lists.
|
||||
*
|
||||
* After a compaction thread isolates a balloon page for migration, it raises
|
||||
* the page refcount to prevent concurrent compaction threads from re-isolating
|
||||
* the same page. For that reason putback_movable_pages(), or other routines
|
||||
* that need to identify isolated balloon pages on private pagelists, cannot
|
||||
* rely on balloon_page_movable() to accomplish the task.
|
||||
*/
|
||||
static inline bool isolated_balloon_page(struct page *page)
|
||||
{
|
||||
/* Already isolated balloon pages, by default, have a raised refcount */
|
||||
if (page_flags_cleared(page) && !page_mapped(page) &&
|
||||
page_count(page) >= 2)
|
||||
return __is_movable_balloon_page(page);
|
||||
|
||||
return false;
|
||||
return PageBalloon(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* balloon_page_insert - insert a page into the balloon's page list and make
|
||||
* the page->mapping assignment accordingly.
|
||||
* the page->private assignment accordingly.
|
||||
* @balloon : pointer to balloon device
|
||||
* @page : page to be assigned as a 'balloon page'
|
||||
* @mapping : allocated special 'balloon_mapping'
|
||||
* @head : balloon's device page list head
|
||||
*
|
||||
* Caller must ensure the page is locked and the spin_lock protecting balloon
|
||||
* pages list is held before inserting a page into the balloon device.
|
||||
*/
|
||||
static inline void balloon_page_insert(struct page *page,
|
||||
struct address_space *mapping,
|
||||
struct list_head *head)
|
||||
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
|
||||
struct page *page)
|
||||
{
|
||||
page->mapping = mapping;
|
||||
list_add(&page->lru, head);
|
||||
__SetPageBalloon(page);
|
||||
SetPagePrivate(page);
|
||||
set_page_private(page, (unsigned long)balloon);
|
||||
list_add(&page->lru, &balloon->pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* balloon_page_delete - delete a page from balloon's page list and clear
|
||||
* the page->mapping assignement accordingly.
|
||||
* the page->private assignement accordingly.
|
||||
* @page : page to be released from balloon's page list
|
||||
*
|
||||
* Caller must ensure the page is locked and the spin_lock protecting balloon
|
||||
@@ -206,8 +139,12 @@ static inline void balloon_page_insert(struct page *page,
|
||||
*/
|
||||
static inline void balloon_page_delete(struct page *page)
|
||||
{
|
||||
page->mapping = NULL;
|
||||
list_del(&page->lru);
|
||||
__ClearPageBalloon(page);
|
||||
set_page_private(page, 0);
|
||||
if (PagePrivate(page)) {
|
||||
ClearPagePrivate(page);
|
||||
list_del(&page->lru);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -216,11 +153,7 @@ static inline void balloon_page_delete(struct page *page)
|
||||
*/
|
||||
static inline struct balloon_dev_info *balloon_page_device(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
if (likely(mapping))
|
||||
return mapping->private_data;
|
||||
|
||||
return NULL;
|
||||
return (struct balloon_dev_info *)page_private(page);
|
||||
}
|
||||
|
||||
static inline gfp_t balloon_mapping_gfp_mask(void)
|
||||
@@ -228,36 +161,26 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
|
||||
return GFP_HIGHUSER_MOVABLE;
|
||||
}
|
||||
|
||||
static inline bool balloon_compaction_check(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_BALLOON_COMPACTION */
|
||||
|
||||
static inline void *balloon_mapping_alloc(void *balloon_device,
|
||||
const struct address_space_operations *a_ops)
|
||||
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
|
||||
struct page *page)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline void balloon_mapping_free(struct address_space *balloon_mapping)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void balloon_page_insert(struct page *page,
|
||||
struct address_space *mapping,
|
||||
struct list_head *head)
|
||||
{
|
||||
list_add(&page->lru, head);
|
||||
__SetPageBalloon(page);
|
||||
list_add(&page->lru, &balloon->pages);
|
||||
}
|
||||
|
||||
static inline void balloon_page_delete(struct page *page)
|
||||
{
|
||||
__ClearPageBalloon(page);
|
||||
list_del(&page->lru);
|
||||
}
|
||||
|
||||
static inline bool __is_movable_balloon_page(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool balloon_page_movable(struct page *page)
|
||||
{
|
||||
return false;
|
||||
@@ -289,9 +212,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
|
||||
return GFP_HIGHUSER;
|
||||
}
|
||||
|
||||
static inline bool balloon_compaction_check(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_BALLOON_COMPACTION */
|
||||
#endif /* _LINUX_BALLOON_COMPACTION_H */
|
||||
|
@@ -267,7 +267,7 @@ struct bcma_device {
|
||||
u8 core_unit;
|
||||
|
||||
u32 addr;
|
||||
u32 addr1;
|
||||
u32 addr_s[8];
|
||||
u32 wrap;
|
||||
|
||||
void __iomem *io_addr;
|
||||
@@ -323,6 +323,8 @@ struct bcma_bus {
|
||||
struct pci_dev *host_pci;
|
||||
/* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
|
||||
struct sdio_func *host_sdio;
|
||||
/* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
|
||||
struct platform_device *host_pdev;
|
||||
};
|
||||
|
||||
struct bcma_chipinfo chipinfo;
|
||||
@@ -332,10 +334,10 @@ struct bcma_bus {
|
||||
struct bcma_device *mapped_core;
|
||||
struct list_head cores;
|
||||
u8 nr_cores;
|
||||
u8 init_done:1;
|
||||
u8 num;
|
||||
|
||||
struct bcma_drv_cc drv_cc;
|
||||
struct bcma_drv_cc_b drv_cc_b;
|
||||
struct bcma_drv_pci drv_pci[2];
|
||||
struct bcma_drv_pcie2 drv_pcie2;
|
||||
struct bcma_drv_mips drv_mips;
|
||||
|
@@ -644,6 +644,12 @@ struct bcma_drv_cc {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcma_drv_cc_b {
|
||||
struct bcma_device *core;
|
||||
u8 setup_done:1;
|
||||
void __iomem *mii;
|
||||
};
|
||||
|
||||
/* Register access */
|
||||
#define bcma_cc_read32(cc, offset) \
|
||||
bcma_read32((cc)->core, offset)
|
||||
@@ -699,4 +705,6 @@ extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid);
|
||||
|
||||
extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc);
|
||||
|
||||
void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value);
|
||||
|
||||
#endif /* LINUX_BCMA_DRIVER_CC_H_ */
|
||||
|
@@ -39,6 +39,11 @@
|
||||
#define BCMA_RESET_CTL_RESET 0x0001
|
||||
#define BCMA_RESET_ST 0x0804
|
||||
|
||||
#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003
|
||||
#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000
|
||||
#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001
|
||||
#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002
|
||||
|
||||
/* BCMA PCI config space registers. */
|
||||
#define BCMA_PCI_PMCSR 0x44
|
||||
#define BCMA_PCI_PE 0x100
|
||||
|
@@ -10,6 +10,7 @@ struct bcma_soc {
|
||||
};
|
||||
|
||||
int __init bcma_host_soc_register(struct bcma_soc *soc);
|
||||
int __init bcma_host_soc_init(struct bcma_soc *soc);
|
||||
|
||||
int bcma_bus_register(struct bcma_bus *bus);
|
||||
|
||||
|
@@ -292,7 +292,24 @@ static inline unsigned bio_segments(struct bio *bio)
|
||||
*/
|
||||
#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
|
||||
|
||||
enum bip_flags {
|
||||
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
|
||||
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
|
||||
BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
|
||||
BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
|
||||
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
|
||||
};
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_rw & REQ_INTEGRITY)
|
||||
return bio->bi_integrity;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* bio integrity payload
|
||||
*/
|
||||
@@ -301,20 +318,40 @@ struct bio_integrity_payload {
|
||||
|
||||
struct bvec_iter bip_iter;
|
||||
|
||||
/* kill - should just use bip_vec */
|
||||
void *bip_buf; /* generated integrity data */
|
||||
|
||||
bio_end_io_t *bip_end_io; /* saved I/O completion fn */
|
||||
|
||||
unsigned short bip_slab; /* slab the bip came from */
|
||||
unsigned short bip_vcnt; /* # of integrity bio_vecs */
|
||||
unsigned bip_owns_buf:1; /* should free bip_buf */
|
||||
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
|
||||
unsigned short bip_flags; /* control flags */
|
||||
|
||||
struct work_struct bip_work; /* I/O completion */
|
||||
|
||||
struct bio_vec *bip_vec;
|
||||
struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
|
||||
};
|
||||
|
||||
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bip)
|
||||
return bip->bip_flags & flag;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
|
||||
{
|
||||
return bip->bip_iter.bi_sector;
|
||||
}
|
||||
|
||||
static inline void bip_set_seed(struct bio_integrity_payload *bip,
|
||||
sector_t seed)
|
||||
{
|
||||
bip->bip_iter.bi_sector = seed;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
extern void bio_trim(struct bio *bio, int offset, int size);
|
||||
@@ -341,6 +378,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
|
||||
}
|
||||
|
||||
extern struct bio_set *bioset_create(unsigned int, unsigned int);
|
||||
extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
|
||||
extern void bioset_free(struct bio_set *);
|
||||
extern mempool_t *biovec_create_pool(int pool_entries);
|
||||
|
||||
@@ -352,7 +390,6 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
||||
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
|
||||
|
||||
extern struct bio_set *fs_bio_set;
|
||||
unsigned int bio_integrity_tag_size(struct bio *bio);
|
||||
|
||||
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||
{
|
||||
@@ -660,14 +697,10 @@ struct biovec_slab {
|
||||
for_each_bio(_bio) \
|
||||
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
|
||||
|
||||
#define bio_integrity(bio) (bio->bi_integrity != NULL)
|
||||
|
||||
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
|
||||
extern void bio_integrity_free(struct bio *);
|
||||
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
|
||||
extern int bio_integrity_enabled(struct bio *bio);
|
||||
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
|
||||
extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
|
||||
extern bool bio_integrity_enabled(struct bio *bio);
|
||||
extern int bio_integrity_prep(struct bio *);
|
||||
extern void bio_integrity_endio(struct bio *, int);
|
||||
extern void bio_integrity_advance(struct bio *, unsigned int);
|
||||
@@ -679,14 +712,14 @@ extern void bio_integrity_init(void);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
static inline int bio_integrity(struct bio *bio)
|
||||
static inline void *bio_integrity(struct bio *bio)
|
||||
{
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int bio_integrity_enabled(struct bio *bio)
|
||||
static inline bool bio_integrity_enabled(struct bio *bio)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
|
||||
@@ -732,6 +765,11 @@ static inline void bio_integrity_init(void)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
@@ -88,32 +88,32 @@
|
||||
* lib/bitmap.c provides these functions:
|
||||
*/
|
||||
|
||||
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
|
||||
extern int __bitmap_full(const unsigned long *bitmap, int bits);
|
||||
extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
|
||||
extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
|
||||
extern int __bitmap_equal(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
int bits);
|
||||
unsigned int nbits);
|
||||
extern void __bitmap_shift_right(unsigned long *dst,
|
||||
const unsigned long *src, int shift, int bits);
|
||||
extern void __bitmap_shift_left(unsigned long *dst,
|
||||
const unsigned long *src, int shift, int bits);
|
||||
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_intersects(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_subset(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
|
||||
|
||||
extern void bitmap_set(unsigned long *map, int i, int len);
|
||||
extern void bitmap_clear(unsigned long *map, int start, int nr);
|
||||
extern void bitmap_set(unsigned long *map, unsigned int start, int len);
|
||||
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
|
||||
extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
unsigned long size,
|
||||
unsigned long start,
|
||||
@@ -140,9 +140,9 @@ extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
|
||||
const unsigned long *relmap, int bits);
|
||||
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
|
||||
int sz, int bits);
|
||||
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
|
||||
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
|
||||
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
|
||||
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
|
||||
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
|
||||
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
|
||||
|
||||
@@ -188,15 +188,15 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
|
||||
}
|
||||
|
||||
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return (*dst = *src1 & *src2) != 0;
|
||||
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
return __bitmap_and(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src1 | *src2;
|
||||
@@ -205,7 +205,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src1 ^ *src2;
|
||||
@@ -214,24 +214,24 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return (*dst = *src1 & ~(*src2)) != 0;
|
||||
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
return __bitmap_andnot(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
int nbits)
|
||||
unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
|
||||
*dst = ~(*src);
|
||||
else
|
||||
__bitmap_complement(dst, src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_equal(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -240,7 +240,7 @@ static inline int bitmap_equal(const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_intersects(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
@@ -249,7 +249,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_subset(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -257,7 +257,7 @@ static inline int bitmap_subset(const unsigned long *src1,
|
||||
return __bitmap_subset(src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_empty(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -265,7 +265,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
|
||||
return __bitmap_empty(src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_full(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -273,7 +273,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
|
||||
return __bitmap_full(src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_weight(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@@ -284,7 +284,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
|
||||
const unsigned long *src, int n, int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src >> n;
|
||||
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
|
||||
else
|
||||
__bitmap_shift_right(dst, src, n, nbits);
|
||||
}
|
||||
|
@@ -32,26 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w);
|
||||
*/
|
||||
#include <asm/bitops.h>
|
||||
|
||||
/*
|
||||
* Provide __deprecated wrappers for the new interface, avoid flag day changes.
|
||||
* We need the ugly external functions to break header recursion hell.
|
||||
*/
|
||||
#ifndef smp_mb__before_clear_bit
|
||||
static inline void __deprecated smp_mb__before_clear_bit(void)
|
||||
{
|
||||
extern void __smp_mb__before_atomic(void);
|
||||
__smp_mb__before_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_clear_bit
|
||||
static inline void __deprecated smp_mb__after_clear_bit(void)
|
||||
{
|
||||
extern void __smp_mb__after_atomic(void);
|
||||
__smp_mb__after_atomic();
|
||||
}
|
||||
#endif
|
||||
|
||||
#define for_each_set_bit(bit, addr, size) \
|
||||
for ((bit) = find_first_bit((addr), (size)); \
|
||||
(bit) < (size); \
|
||||
|
@@ -4,6 +4,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
struct blk_mq_tags;
|
||||
struct blk_flush_queue;
|
||||
|
||||
struct blk_mq_cpu_notifier {
|
||||
struct list_head list;
|
||||
@@ -34,6 +35,7 @@ struct blk_mq_hw_ctx {
|
||||
|
||||
struct request_queue *queue;
|
||||
unsigned int queue_num;
|
||||
struct blk_flush_queue *fq;
|
||||
|
||||
void *driver_data;
|
||||
|
||||
@@ -77,8 +79,9 @@ struct blk_mq_tag_set {
|
||||
struct list_head tag_list;
|
||||
};
|
||||
|
||||
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
|
||||
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool);
|
||||
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
|
||||
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
||||
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
||||
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
typedef int (init_request_fn)(void *, struct request *, unsigned int,
|
||||
@@ -86,6 +89,9 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int,
|
||||
typedef void (exit_request_fn)(void *, struct request *, unsigned int,
|
||||
unsigned int);
|
||||
|
||||
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
|
||||
bool);
|
||||
|
||||
struct blk_mq_ops {
|
||||
/*
|
||||
* Queue request
|
||||
@@ -100,7 +106,7 @@ struct blk_mq_ops {
|
||||
/*
|
||||
* Called on request timeout
|
||||
*/
|
||||
rq_timed_out_fn *timeout;
|
||||
timeout_fn *timeout;
|
||||
|
||||
softirq_done_fn *complete;
|
||||
|
||||
@@ -115,6 +121,10 @@ struct blk_mq_ops {
|
||||
/*
|
||||
* Called for every command allocated by the block layer to allow
|
||||
* the driver to set up driver specific data.
|
||||
*
|
||||
* Tag greater than or equal to queue_depth is for setting up
|
||||
* flush request.
|
||||
*
|
||||
* Ditto for exit/teardown.
|
||||
*/
|
||||
init_request_fn *init_request;
|
||||
@@ -127,10 +137,9 @@ enum {
|
||||
BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
|
||||
|
||||
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
|
||||
BLK_MQ_F_SHOULD_SORT = 1 << 1,
|
||||
BLK_MQ_F_TAG_SHARED = 1 << 2,
|
||||
BLK_MQ_F_SG_MERGE = 1 << 3,
|
||||
BLK_MQ_F_SYSFS_UP = 1 << 4,
|
||||
BLK_MQ_F_TAG_SHARED = 1 << 1,
|
||||
BLK_MQ_F_SG_MERGE = 1 << 2,
|
||||
BLK_MQ_F_SYSFS_UP = 1 << 3,
|
||||
|
||||
BLK_MQ_S_STOPPED = 0,
|
||||
BLK_MQ_S_TAG_ACTIVE = 1,
|
||||
@@ -141,6 +150,7 @@ enum {
|
||||
};
|
||||
|
||||
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
|
||||
void blk_mq_finish_init(struct request_queue *q);
|
||||
int blk_mq_register_disk(struct gendisk *);
|
||||
void blk_mq_unregister_disk(struct gendisk *);
|
||||
|
||||
@@ -160,8 +170,9 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
||||
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
|
||||
|
||||
void blk_mq_end_io(struct request *rq, int error);
|
||||
void __blk_mq_end_io(struct request *rq, int error);
|
||||
void blk_mq_start_request(struct request *rq);
|
||||
void blk_mq_end_request(struct request *rq, int error);
|
||||
void __blk_mq_end_request(struct request *rq, int error);
|
||||
|
||||
void blk_mq_requeue_request(struct request *rq);
|
||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
|
||||
@@ -174,7 +185,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
|
||||
void blk_mq_start_hw_queues(struct request_queue *q);
|
||||
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
|
||||
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
|
||||
void *priv);
|
||||
|
||||
/*
|
||||
* Driver command data is immediately after the request. So subtract request
|
||||
|
@@ -78,9 +78,11 @@ struct bio {
|
||||
struct io_context *bi_ioc;
|
||||
struct cgroup_subsys_state *bi_css;
|
||||
#endif
|
||||
union {
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
struct bio_integrity_payload *bi_integrity; /* data integrity */
|
||||
struct bio_integrity_payload *bi_integrity; /* data integrity */
|
||||
#endif
|
||||
};
|
||||
|
||||
unsigned short bi_vcnt; /* how many bio_vec's */
|
||||
|
||||
@@ -118,10 +120,8 @@ struct bio {
|
||||
#define BIO_USER_MAPPED 6 /* contains user pages */
|
||||
#define BIO_EOPNOTSUPP 7 /* not supported */
|
||||
#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
|
||||
#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */
|
||||
#define BIO_QUIET 10 /* Make BIO Quiet */
|
||||
#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
|
||||
#define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */
|
||||
#define BIO_QUIET 9 /* Make BIO Quiet */
|
||||
#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
|
||||
|
||||
/*
|
||||
* Flags starting here get preserved by bio_reset() - this includes
|
||||
@@ -162,6 +162,7 @@ enum rq_flag_bits {
|
||||
__REQ_WRITE_SAME, /* write same block many times */
|
||||
|
||||
__REQ_NOIDLE, /* don't anticipate more IO after this one */
|
||||
__REQ_INTEGRITY, /* I/O includes block integrity payload */
|
||||
__REQ_FUA, /* forced unit access */
|
||||
__REQ_FLUSH, /* request for cache flush */
|
||||
|
||||
@@ -186,9 +187,7 @@ enum rq_flag_bits {
|
||||
__REQ_FLUSH_SEQ, /* request for flush sequence */
|
||||
__REQ_IO_STAT, /* account I/O stat */
|
||||
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
|
||||
__REQ_KERNEL, /* direct IO to kernel pages */
|
||||
__REQ_PM, /* runtime pm request */
|
||||
__REQ_END, /* last of chain of requests */
|
||||
__REQ_HASHED, /* on IO scheduler merge hash */
|
||||
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
@@ -204,13 +203,14 @@ enum rq_flag_bits {
|
||||
#define REQ_DISCARD (1ULL << __REQ_DISCARD)
|
||||
#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
|
||||
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
|
||||
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
|
||||
|
||||
#define REQ_FAILFAST_MASK \
|
||||
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
|
||||
#define REQ_COMMON_MASK \
|
||||
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
|
||||
REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
|
||||
REQ_SECURE)
|
||||
REQ_SECURE | REQ_INTEGRITY)
|
||||
#define REQ_CLONE_MASK REQ_COMMON_MASK
|
||||
|
||||
#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
|
||||
@@ -240,9 +240,7 @@ enum rq_flag_bits {
|
||||
#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
|
||||
#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
|
||||
#define REQ_SECURE (1ULL << __REQ_SECURE)
|
||||
#define REQ_KERNEL (1ULL << __REQ_KERNEL)
|
||||
#define REQ_PM (1ULL << __REQ_PM)
|
||||
#define REQ_END (1ULL << __REQ_END)
|
||||
#define REQ_HASHED (1ULL << __REQ_HASHED)
|
||||
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
|
||||
|
||||
|
@@ -21,6 +21,7 @@
|
||||
#include <linux/bsg.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
|
||||
@@ -35,6 +36,7 @@ struct request;
|
||||
struct sg_io_hdr;
|
||||
struct bsg_job;
|
||||
struct blkcg_gq;
|
||||
struct blk_flush_queue;
|
||||
|
||||
#define BLKDEV_MIN_RQ 4
|
||||
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
||||
@@ -454,14 +456,7 @@ struct request_queue {
|
||||
*/
|
||||
unsigned int flush_flags;
|
||||
unsigned int flush_not_queueable:1;
|
||||
unsigned int flush_queue_delayed:1;
|
||||
unsigned int flush_pending_idx:1;
|
||||
unsigned int flush_running_idx:1;
|
||||
unsigned long flush_pending_since;
|
||||
struct list_head flush_queue[2];
|
||||
struct list_head flush_data_in_flight;
|
||||
struct request *flush_rq;
|
||||
spinlock_t mq_flush_lock;
|
||||
struct blk_flush_queue *fq;
|
||||
|
||||
struct list_head requeue_list;
|
||||
spinlock_t requeue_lock;
|
||||
@@ -470,6 +465,7 @@ struct request_queue {
|
||||
struct mutex sysfs_lock;
|
||||
|
||||
int bypass_depth;
|
||||
int mq_freeze_depth;
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
bsg_job_fn *bsg_job_fn;
|
||||
@@ -483,7 +479,7 @@ struct request_queue {
|
||||
#endif
|
||||
struct rcu_head rcu_head;
|
||||
wait_queue_head_t mq_freeze_wq;
|
||||
struct percpu_counter mq_usage_counter;
|
||||
struct percpu_ref mq_usage_counter;
|
||||
struct list_head all_q_node;
|
||||
|
||||
struct blk_mq_tag_set *tag_set;
|
||||
@@ -863,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
|
||||
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_disk->queue;
|
||||
return bdev->bd_disk->queue; /* this is never NULL */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1140,8 +1136,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
||||
/*
|
||||
* tag stuff
|
||||
*/
|
||||
#define blk_rq_tagged(rq) \
|
||||
((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
|
||||
#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
|
||||
extern int blk_queue_start_tag(struct request_queue *, struct request *);
|
||||
extern struct request *blk_queue_find_tag(struct request_queue *, int);
|
||||
extern void blk_queue_end_tag(struct request_queue *, struct request *);
|
||||
@@ -1283,10 +1278,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
|
||||
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
|
||||
{
|
||||
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
|
||||
unsigned int alignment = (sector << 9) & (granularity - 1);
|
||||
unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
|
||||
|
||||
return (granularity + lim->alignment_offset - alignment)
|
||||
& (granularity - 1);
|
||||
return (granularity + lim->alignment_offset - alignment) % granularity;
|
||||
}
|
||||
|
||||
static inline int bdev_alignment_offset(struct block_device *bdev)
|
||||
@@ -1462,32 +1456,31 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
|
||||
#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
|
||||
enum blk_integrity_flags {
|
||||
BLK_INTEGRITY_VERIFY = 1 << 0,
|
||||
BLK_INTEGRITY_GENERATE = 1 << 1,
|
||||
BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
|
||||
BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
|
||||
};
|
||||
|
||||
struct blk_integrity_exchg {
|
||||
struct blk_integrity_iter {
|
||||
void *prot_buf;
|
||||
void *data_buf;
|
||||
sector_t sector;
|
||||
sector_t seed;
|
||||
unsigned int data_size;
|
||||
unsigned short sector_size;
|
||||
unsigned short interval;
|
||||
const char *disk_name;
|
||||
};
|
||||
|
||||
typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
|
||||
typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
|
||||
typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
|
||||
typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
|
||||
typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
|
||||
|
||||
struct blk_integrity {
|
||||
integrity_gen_fn *generate_fn;
|
||||
integrity_vrfy_fn *verify_fn;
|
||||
integrity_set_tag_fn *set_tag_fn;
|
||||
integrity_get_tag_fn *get_tag_fn;
|
||||
integrity_processing_fn *generate_fn;
|
||||
integrity_processing_fn *verify_fn;
|
||||
|
||||
unsigned short flags;
|
||||
unsigned short tuple_size;
|
||||
unsigned short sector_size;
|
||||
unsigned short interval;
|
||||
unsigned short tag_size;
|
||||
|
||||
const char *name;
|
||||
@@ -1502,10 +1495,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
|
||||
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
|
||||
struct scatterlist *);
|
||||
extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
|
||||
extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
|
||||
struct request *);
|
||||
extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
|
||||
struct bio *);
|
||||
extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
|
||||
struct request *);
|
||||
extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
|
||||
struct bio *);
|
||||
|
||||
static inline
|
||||
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
|
||||
@@ -1518,12 +1511,9 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
|
||||
return disk->integrity;
|
||||
}
|
||||
|
||||
static inline int blk_integrity_rq(struct request *rq)
|
||||
static inline bool blk_integrity_rq(struct request *rq)
|
||||
{
|
||||
if (rq->bio == NULL)
|
||||
return 0;
|
||||
|
||||
return bio_integrity(rq->bio);
|
||||
return rq->cmd_flags & REQ_INTEGRITY;
|
||||
}
|
||||
|
||||
static inline void blk_queue_max_integrity_segments(struct request_queue *q,
|
||||
@@ -1562,7 +1552,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q,
|
||||
}
|
||||
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
|
||||
{
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
|
||||
{
|
||||
@@ -1588,17 +1578,17 @@ static inline unsigned short queue_max_integrity_segments(struct request_queue *
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int blk_integrity_merge_rq(struct request_queue *rq,
|
||||
struct request *r1,
|
||||
struct request *r2)
|
||||
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
|
||||
struct request *r1,
|
||||
struct request *r2)
|
||||
{
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
static inline int blk_integrity_merge_bio(struct request_queue *rq,
|
||||
struct request *r,
|
||||
struct bio *b)
|
||||
static inline bool blk_integrity_merge_bio(struct request_queue *rq,
|
||||
struct request *r,
|
||||
struct bio *b)
|
||||
{
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
static inline bool blk_integrity_is_initialized(struct gendisk *g)
|
||||
{
|
||||
|
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
|
||||
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
|
||||
|
||||
extern unsigned long free_all_bootmem(void);
|
||||
extern void reset_node_managed_pages(pg_data_t *pgdat);
|
||||
extern void reset_all_zones_managed_pages(void);
|
||||
|
||||
extern void free_bootmem_node(pg_data_t *pgdat,
|
||||
|
136
include/linux/bpf.h
Normal file
136
include/linux/bpf.h
Normal file
@@ -0,0 +1,136 @@
|
||||
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef _LINUX_BPF_H
|
||||
#define _LINUX_BPF_H 1
|
||||
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/file.h>
|
||||
|
||||
struct bpf_map;
|
||||
|
||||
/* map is generic key/value storage optionally accesible by eBPF programs */
|
||||
struct bpf_map_ops {
|
||||
/* funcs callable from userspace (via syscall) */
|
||||
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
|
||||
void (*map_free)(struct bpf_map *);
|
||||
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
|
||||
|
||||
/* funcs callable from userspace and from eBPF programs */
|
||||
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
|
||||
int (*map_update_elem)(struct bpf_map *map, void *key, void *value);
|
||||
int (*map_delete_elem)(struct bpf_map *map, void *key);
|
||||
};
|
||||
|
||||
struct bpf_map {
|
||||
atomic_t refcnt;
|
||||
enum bpf_map_type map_type;
|
||||
u32 key_size;
|
||||
u32 value_size;
|
||||
u32 max_entries;
|
||||
struct bpf_map_ops *ops;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct bpf_map_type_list {
|
||||
struct list_head list_node;
|
||||
struct bpf_map_ops *ops;
|
||||
enum bpf_map_type type;
|
||||
};
|
||||
|
||||
void bpf_register_map_type(struct bpf_map_type_list *tl);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
struct bpf_map *bpf_map_get(struct fd f);
|
||||
|
||||
/* function argument constraints */
|
||||
enum bpf_arg_type {
|
||||
ARG_ANYTHING = 0, /* any argument is ok */
|
||||
|
||||
/* the following constraints used to prototype
|
||||
* bpf_map_lookup/update/delete_elem() functions
|
||||
*/
|
||||
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
|
||||
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
|
||||
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
|
||||
|
||||
/* the following constraints used to prototype bpf_memcmp() and other
|
||||
* functions that access data on eBPF program stack
|
||||
*/
|
||||
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
|
||||
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
|
||||
};
|
||||
|
||||
/* type of values returned from helper functions */
|
||||
enum bpf_return_type {
|
||||
RET_INTEGER, /* function returns integer */
|
||||
RET_VOID, /* function doesn't return anything */
|
||||
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
|
||||
};
|
||||
|
||||
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
|
||||
* to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
|
||||
* instructions after verifying
|
||||
*/
|
||||
struct bpf_func_proto {
|
||||
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
bool gpl_only;
|
||||
enum bpf_return_type ret_type;
|
||||
enum bpf_arg_type arg1_type;
|
||||
enum bpf_arg_type arg2_type;
|
||||
enum bpf_arg_type arg3_type;
|
||||
enum bpf_arg_type arg4_type;
|
||||
enum bpf_arg_type arg5_type;
|
||||
};
|
||||
|
||||
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
|
||||
* the first argument to eBPF programs.
|
||||
* For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
|
||||
*/
|
||||
struct bpf_context;
|
||||
|
||||
enum bpf_access_type {
|
||||
BPF_READ = 1,
|
||||
BPF_WRITE = 2
|
||||
};
|
||||
|
||||
struct bpf_verifier_ops {
|
||||
/* return eBPF function prototype for verification */
|
||||
const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
|
||||
|
||||
/* return true if 'size' wide access at offset 'off' within bpf_context
|
||||
* with 'type' (read or write) is allowed
|
||||
*/
|
||||
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
|
||||
};
|
||||
|
||||
struct bpf_prog_type_list {
|
||||
struct list_head list_node;
|
||||
struct bpf_verifier_ops *ops;
|
||||
enum bpf_prog_type type;
|
||||
};
|
||||
|
||||
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
|
||||
|
||||
struct bpf_prog;
|
||||
|
||||
struct bpf_prog_aux {
|
||||
atomic_t refcnt;
|
||||
bool is_gpl_compatible;
|
||||
enum bpf_prog_type prog_type;
|
||||
struct bpf_verifier_ops *ops;
|
||||
struct bpf_map **used_maps;
|
||||
u32 used_map_cnt;
|
||||
struct bpf_prog *prog;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
void bpf_prog_put(struct bpf_prog *prog);
|
||||
struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||
/* verify correctness of eBPF program */
|
||||
int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
@@ -13,18 +13,21 @@
|
||||
#define PHY_ID_BCM5461 0x002060c0
|
||||
#define PHY_ID_BCM57780 0x03625d90
|
||||
|
||||
#define PHY_ID_BCM7250 0xae025280
|
||||
#define PHY_ID_BCM7364 0xae025260
|
||||
#define PHY_ID_BCM7366 0x600d8490
|
||||
#define PHY_ID_BCM7425 0x03625e60
|
||||
#define PHY_ID_BCM7429 0x600d8730
|
||||
#define PHY_ID_BCM7439 0x600d8480
|
||||
#define PHY_ID_BCM7445 0x600d8510
|
||||
#define PHY_ID_BCM7XXX_28 0x600d8400
|
||||
|
||||
#define PHY_BCM_OUI_MASK 0xfffffc00
|
||||
#define PHY_BCM_OUI_1 0x00206000
|
||||
#define PHY_BCM_OUI_2 0x0143bc00
|
||||
#define PHY_BCM_OUI_3 0x03625c00
|
||||
#define PHY_BCM_OUI_4 0x600d0000
|
||||
#define PHY_BCM_OUI_4 0x600d8400
|
||||
#define PHY_BCM_OUI_5 0x03625e00
|
||||
|
||||
#define PHY_BCM_OUI_6 0xae025000
|
||||
|
||||
#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
|
||||
#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
|
||||
@@ -39,7 +42,8 @@
|
||||
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
|
||||
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
|
||||
/* Broadcom BCM7xxx specific workarounds */
|
||||
#define PHY_BRCM_100MBPS_WAR 0x00010000
|
||||
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
|
||||
#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
|
||||
#define PHY_BCM_FLAGS_VALID 0x80000000
|
||||
|
||||
/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
|
||||
@@ -93,4 +97,130 @@
|
||||
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
|
||||
|
||||
/*
|
||||
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
|
||||
* BCM5482, and possibly some others.
|
||||
*/
|
||||
#define BCM_LED_SRC_LINKSPD1 0x0
|
||||
#define BCM_LED_SRC_LINKSPD2 0x1
|
||||
#define BCM_LED_SRC_XMITLED 0x2
|
||||
#define BCM_LED_SRC_ACTIVITYLED 0x3
|
||||
#define BCM_LED_SRC_FDXLED 0x4
|
||||
#define BCM_LED_SRC_SLAVE 0x5
|
||||
#define BCM_LED_SRC_INTR 0x6
|
||||
#define BCM_LED_SRC_QUALITY 0x7
|
||||
#define BCM_LED_SRC_RCVLED 0x8
|
||||
#define BCM_LED_SRC_MULTICOLOR1 0xa
|
||||
#define BCM_LED_SRC_OPENSHORT 0xb
|
||||
#define BCM_LED_SRC_OFF 0xe /* Tied high */
|
||||
#define BCM_LED_SRC_ON 0xf /* Tied low */
|
||||
|
||||
|
||||
/*
|
||||
* BCM5482: Shadow registers
|
||||
* Shadow values go into bits [14:10] of register 0x1c to select a shadow
|
||||
* register to access.
|
||||
*/
|
||||
/* 00101: Spare Control Register 3 */
|
||||
#define BCM54XX_SHD_SCR3 0x05
|
||||
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
|
||||
#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
|
||||
#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
|
||||
|
||||
/* 01010: Auto Power-Down */
|
||||
#define BCM54XX_SHD_APD 0x0a
|
||||
#define BCM54XX_SHD_APD_EN 0x0020
|
||||
|
||||
#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
|
||||
/* LED3 / ~LINKSPD[2] selector */
|
||||
#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
|
||||
/* LED1 / ~LINKSPD[1] selector */
|
||||
#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
|
||||
#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
|
||||
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
|
||||
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
|
||||
#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
|
||||
#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
|
||||
#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
|
||||
|
||||
|
||||
/*
|
||||
* EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
|
||||
*/
|
||||
#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
|
||||
#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
|
||||
#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
|
||||
#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
|
||||
#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
|
||||
#define MII_BCM54XX_EXP_EXP08 0x0F08
|
||||
#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
|
||||
#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
|
||||
#define MII_BCM54XX_EXP_EXP75 0x0f75
|
||||
#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
|
||||
#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
|
||||
#define MII_BCM54XX_EXP_EXP96 0x0f96
|
||||
#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
|
||||
#define MII_BCM54XX_EXP_EXP97 0x0f97
|
||||
#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
|
||||
|
||||
/*
|
||||
* BCM5482: Secondary SerDes registers
|
||||
*/
|
||||
#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
|
||||
#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
|
||||
#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
|
||||
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
|
||||
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Fast Ethernet Transceiver definitions. */
|
||||
/*****************************************************************************/
|
||||
|
||||
#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */
|
||||
#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */
|
||||
#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */
|
||||
#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */
|
||||
#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */
|
||||
#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */
|
||||
|
||||
#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */
|
||||
#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */
|
||||
|
||||
|
||||
/*** Shadow register definitions ***/
|
||||
|
||||
#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */
|
||||
#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
|
||||
|
||||
#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
|
||||
#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
|
||||
#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
|
||||
|
||||
#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
|
||||
#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
|
||||
|
||||
/*
|
||||
* Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
|
||||
* 0x1c shadow registers.
|
||||
*/
|
||||
static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
|
||||
{
|
||||
phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
|
||||
return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
|
||||
}
|
||||
|
||||
static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
|
||||
u16 val)
|
||||
{
|
||||
return phy_write(phydev, MII_BCM54XX_SHD,
|
||||
MII_BCM54XX_SHD_WRITE |
|
||||
MII_BCM54XX_SHD_VAL(shadow) |
|
||||
MII_BCM54XX_SHD_DATA(val));
|
||||
}
|
||||
|
||||
#define BRCM_CL45VEN_EEE_CONTROL 0x803d
|
||||
#define LPI_FEATURE_EN 0x8000
|
||||
#define LPI_FEATURE_EN_DIG1000X 0x4000
|
||||
|
||||
#endif /* _LINUX_BRCMPHY_H */
|
||||
|
@@ -175,12 +175,13 @@ void __wait_on_buffer(struct buffer_head *);
|
||||
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
|
||||
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
|
||||
unsigned size);
|
||||
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
|
||||
unsigned size);
|
||||
struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
|
||||
unsigned size, gfp_t gfp);
|
||||
void __brelse(struct buffer_head *);
|
||||
void __bforget(struct buffer_head *);
|
||||
void __breadahead(struct block_device *, sector_t block, unsigned int size);
|
||||
struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
|
||||
struct buffer_head *__bread_gfp(struct block_device *,
|
||||
sector_t block, unsigned size, gfp_t gfp);
|
||||
void invalidate_bh_lrus(void);
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
||||
void free_buffer_head(struct buffer_head * bh);
|
||||
@@ -295,7 +296,13 @@ static inline void bforget(struct buffer_head *bh)
|
||||
static inline struct buffer_head *
|
||||
sb_bread(struct super_block *sb, sector_t block)
|
||||
{
|
||||
return __bread(sb->s_bdev, block, sb->s_blocksize);
|
||||
return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
|
||||
}
|
||||
|
||||
static inline struct buffer_head *
|
||||
sb_bread_unmovable(struct super_block *sb, sector_t block)
|
||||
{
|
||||
return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -307,7 +314,7 @@ sb_breadahead(struct super_block *sb, sector_t block)
|
||||
static inline struct buffer_head *
|
||||
sb_getblk(struct super_block *sb, sector_t block)
|
||||
{
|
||||
return __getblk(sb->s_bdev, block, sb->s_blocksize);
|
||||
return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
|
||||
}
|
||||
|
||||
static inline struct buffer_head *
|
||||
@@ -344,6 +351,36 @@ static inline void lock_buffer(struct buffer_head *bh)
|
||||
__lock_buffer(bh);
|
||||
}
|
||||
|
||||
static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
|
||||
sector_t block,
|
||||
unsigned size)
|
||||
{
|
||||
return __getblk_gfp(bdev, block, size, 0);
|
||||
}
|
||||
|
||||
static inline struct buffer_head *__getblk(struct block_device *bdev,
|
||||
sector_t block,
|
||||
unsigned size)
|
||||
{
|
||||
return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
|
||||
}
|
||||
|
||||
/**
|
||||
* __bread() - reads a specified block and returns the bh
|
||||
* @bdev: the block_device to read from
|
||||
* @block: number of block
|
||||
* @size: size (in bytes) to read
|
||||
*
|
||||
* Reads a specified block, and returns buffer head that contains it.
|
||||
* The page cache is allocated from movable area so that it can be migrated.
|
||||
* It returns NULL if the block was unreadable.
|
||||
*/
|
||||
static inline struct buffer_head *
|
||||
__bread(struct block_device *bdev, sector_t block, unsigned size)
|
||||
{
|
||||
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
|
||||
}
|
||||
|
||||
extern int __set_page_dirty_buffers(struct page *page);
|
||||
|
||||
#else /* CONFIG_BLOCK */
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#define _LINUX_BYTEORDER_GENERIC_H
|
||||
|
||||
/*
|
||||
* linux/byteorder_generic.h
|
||||
* linux/byteorder/generic.h
|
||||
* Generic Byte-reordering support
|
||||
*
|
||||
* The "... p" macros, like le64_to_cpup, can be used with pointers
|
||||
|
@@ -26,6 +26,13 @@ struct ccp_cmd;
|
||||
#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
|
||||
defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
|
||||
|
||||
/**
|
||||
* ccp_present - check if a CCP device is present
|
||||
*
|
||||
* Returns zero if a CCP device is present, -ENODEV otherwise.
|
||||
*/
|
||||
int ccp_present(void);
|
||||
|
||||
/**
|
||||
* ccp_enqueue_cmd - queue an operation for processing by the CCP
|
||||
*
|
||||
@@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
|
||||
|
||||
#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
|
||||
|
||||
static inline int ccp_present(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
@@ -211,7 +211,6 @@ extern struct page **ceph_get_direct_page_vector(const void __user *data,
|
||||
bool write_page);
|
||||
extern void ceph_put_page_vector(struct page **pages, int num_pages,
|
||||
bool dirty);
|
||||
extern void ceph_release_page_vector(struct page **pages, int num_pages);
|
||||
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
|
||||
extern int ceph_copy_user_to_page_vector(struct page **pages,
|
||||
const void __user *data,
|
||||
|
@@ -285,19 +285,9 @@ extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
|
||||
|
||||
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
||||
bool can_fail);
|
||||
extern void ceph_msg_kfree(struct ceph_msg *m);
|
||||
|
||||
|
||||
static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
|
||||
{
|
||||
kref_get(&msg->kref);
|
||||
return msg;
|
||||
}
|
||||
extern void ceph_msg_last_put(struct kref *kref);
|
||||
static inline void ceph_msg_put(struct ceph_msg *msg)
|
||||
{
|
||||
kref_put(&msg->kref, ceph_msg_last_put);
|
||||
}
|
||||
extern struct ceph_msg *ceph_msg_get(struct ceph_msg *msg);
|
||||
extern void ceph_msg_put(struct ceph_msg *msg);
|
||||
|
||||
extern void ceph_msg_dump(struct ceph_msg *msg);
|
||||
|
||||
|
@@ -117,7 +117,7 @@ struct ceph_osd_request {
|
||||
struct list_head r_req_lru_item;
|
||||
struct list_head r_osd_item;
|
||||
struct list_head r_linger_item;
|
||||
struct list_head r_linger_osd;
|
||||
struct list_head r_linger_osd_item;
|
||||
struct ceph_osd *r_osd;
|
||||
struct ceph_pg r_pgid;
|
||||
int r_pg_osds[CEPH_PG_MAX_SIZE];
|
||||
@@ -325,22 +325,14 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
|
||||
|
||||
extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
|
||||
static inline void ceph_osdc_get_request(struct ceph_osd_request *req)
|
||||
{
|
||||
kref_get(&req->r_kref);
|
||||
}
|
||||
extern void ceph_osdc_release_request(struct kref *kref);
|
||||
static inline void ceph_osdc_put_request(struct ceph_osd_request *req)
|
||||
{
|
||||
kref_put(&req->r_kref, ceph_osdc_release_request);
|
||||
}
|
||||
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
|
||||
|
||||
extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req,
|
||||
bool nofail);
|
||||
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
|
||||
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#define __FS_CEPH_PAGELIST_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
struct ceph_pagelist {
|
||||
struct list_head head;
|
||||
@@ -10,6 +11,7 @@ struct ceph_pagelist {
|
||||
size_t room;
|
||||
struct list_head free_list;
|
||||
size_t num_pages_free;
|
||||
atomic_t refcnt;
|
||||
};
|
||||
|
||||
struct ceph_pagelist_cursor {
|
||||
@@ -26,9 +28,10 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
|
||||
pl->room = 0;
|
||||
INIT_LIST_HEAD(&pl->free_list);
|
||||
pl->num_pages_free = 0;
|
||||
atomic_set(&pl->refcnt, 1);
|
||||
}
|
||||
|
||||
extern int ceph_pagelist_release(struct ceph_pagelist *pl);
|
||||
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
|
||||
|
||||
extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
|
||||
|
||||
|
@@ -172,6 +172,7 @@ extern const char *ceph_osd_state_name(int s);
|
||||
#define CEPH_OSD_OP_MODE_WR 0x2000
|
||||
#define CEPH_OSD_OP_MODE_RMW 0x3000
|
||||
#define CEPH_OSD_OP_MODE_SUB 0x4000
|
||||
#define CEPH_OSD_OP_MODE_CACHE 0x8000
|
||||
|
||||
#define CEPH_OSD_OP_TYPE 0x0f00
|
||||
#define CEPH_OSD_OP_TYPE_LOCK 0x0100
|
||||
@@ -181,103 +182,135 @@ extern const char *ceph_osd_state_name(int s);
|
||||
#define CEPH_OSD_OP_TYPE_PG 0x0500
|
||||
#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */
|
||||
|
||||
#define __CEPH_OSD_OP1(mode, nr) \
|
||||
(CEPH_OSD_OP_MODE_##mode | (nr))
|
||||
|
||||
#define __CEPH_OSD_OP(mode, type, nr) \
|
||||
(CEPH_OSD_OP_MODE_##mode | CEPH_OSD_OP_TYPE_##type | (nr))
|
||||
|
||||
#define __CEPH_FORALL_OSD_OPS(f) \
|
||||
/** data **/ \
|
||||
/* read */ \
|
||||
f(READ, __CEPH_OSD_OP(RD, DATA, 1), "read") \
|
||||
f(STAT, __CEPH_OSD_OP(RD, DATA, 2), "stat") \
|
||||
f(MAPEXT, __CEPH_OSD_OP(RD, DATA, 3), "mapext") \
|
||||
\
|
||||
/* fancy read */ \
|
||||
f(MASKTRUNC, __CEPH_OSD_OP(RD, DATA, 4), "masktrunc") \
|
||||
f(SPARSE_READ, __CEPH_OSD_OP(RD, DATA, 5), "sparse-read") \
|
||||
\
|
||||
f(NOTIFY, __CEPH_OSD_OP(RD, DATA, 6), "notify") \
|
||||
f(NOTIFY_ACK, __CEPH_OSD_OP(RD, DATA, 7), "notify-ack") \
|
||||
\
|
||||
/* versioning */ \
|
||||
f(ASSERT_VER, __CEPH_OSD_OP(RD, DATA, 8), "assert-version") \
|
||||
\
|
||||
f(LIST_WATCHERS, __CEPH_OSD_OP(RD, DATA, 9), "list-watchers") \
|
||||
\
|
||||
f(LIST_SNAPS, __CEPH_OSD_OP(RD, DATA, 10), "list-snaps") \
|
||||
\
|
||||
/* sync */ \
|
||||
f(SYNC_READ, __CEPH_OSD_OP(RD, DATA, 11), "sync_read") \
|
||||
\
|
||||
/* write */ \
|
||||
f(WRITE, __CEPH_OSD_OP(WR, DATA, 1), "write") \
|
||||
f(WRITEFULL, __CEPH_OSD_OP(WR, DATA, 2), "writefull") \
|
||||
f(TRUNCATE, __CEPH_OSD_OP(WR, DATA, 3), "truncate") \
|
||||
f(ZERO, __CEPH_OSD_OP(WR, DATA, 4), "zero") \
|
||||
f(DELETE, __CEPH_OSD_OP(WR, DATA, 5), "delete") \
|
||||
\
|
||||
/* fancy write */ \
|
||||
f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \
|
||||
f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \
|
||||
f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \
|
||||
f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \
|
||||
\
|
||||
f(TMAPUP, __CEPH_OSD_OP(RMW, DATA, 10), "tmapup") \
|
||||
f(TMAPPUT, __CEPH_OSD_OP(WR, DATA, 11), "tmapput") \
|
||||
f(TMAPGET, __CEPH_OSD_OP(RD, DATA, 12), "tmapget") \
|
||||
\
|
||||
f(CREATE, __CEPH_OSD_OP(WR, DATA, 13), "create") \
|
||||
f(ROLLBACK, __CEPH_OSD_OP(WR, DATA, 14), "rollback") \
|
||||
\
|
||||
f(WATCH, __CEPH_OSD_OP(WR, DATA, 15), "watch") \
|
||||
\
|
||||
/* omap */ \
|
||||
f(OMAPGETKEYS, __CEPH_OSD_OP(RD, DATA, 17), "omap-get-keys") \
|
||||
f(OMAPGETVALS, __CEPH_OSD_OP(RD, DATA, 18), "omap-get-vals") \
|
||||
f(OMAPGETHEADER, __CEPH_OSD_OP(RD, DATA, 19), "omap-get-header") \
|
||||
f(OMAPGETVALSBYKEYS, __CEPH_OSD_OP(RD, DATA, 20), "omap-get-vals-by-keys") \
|
||||
f(OMAPSETVALS, __CEPH_OSD_OP(WR, DATA, 21), "omap-set-vals") \
|
||||
f(OMAPSETHEADER, __CEPH_OSD_OP(WR, DATA, 22), "omap-set-header") \
|
||||
f(OMAPCLEAR, __CEPH_OSD_OP(WR, DATA, 23), "omap-clear") \
|
||||
f(OMAPRMKEYS, __CEPH_OSD_OP(WR, DATA, 24), "omap-rm-keys") \
|
||||
f(OMAP_CMP, __CEPH_OSD_OP(RD, DATA, 25), "omap-cmp") \
|
||||
\
|
||||
/* tiering */ \
|
||||
f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \
|
||||
f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
|
||||
f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \
|
||||
f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \
|
||||
f(COPY_GET, __CEPH_OSD_OP(RD, DATA, 30), "copy-get") \
|
||||
f(CACHE_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 31), "cache-flush") \
|
||||
f(CACHE_EVICT, __CEPH_OSD_OP(CACHE, DATA, 32), "cache-evict") \
|
||||
f(CACHE_TRY_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 33), "cache-try-flush") \
|
||||
\
|
||||
/* convert tmap to omap */ \
|
||||
f(TMAP2OMAP, __CEPH_OSD_OP(RMW, DATA, 34), "tmap2omap") \
|
||||
\
|
||||
/* hints */ \
|
||||
f(SETALLOCHINT, __CEPH_OSD_OP(WR, DATA, 35), "set-alloc-hint") \
|
||||
\
|
||||
/** multi **/ \
|
||||
f(CLONERANGE, __CEPH_OSD_OP(WR, MULTI, 1), "clonerange") \
|
||||
f(ASSERT_SRC_VERSION, __CEPH_OSD_OP(RD, MULTI, 2), "assert-src-version") \
|
||||
f(SRC_CMPXATTR, __CEPH_OSD_OP(RD, MULTI, 3), "src-cmpxattr") \
|
||||
\
|
||||
/** attrs **/ \
|
||||
/* read */ \
|
||||
f(GETXATTR, __CEPH_OSD_OP(RD, ATTR, 1), "getxattr") \
|
||||
f(GETXATTRS, __CEPH_OSD_OP(RD, ATTR, 2), "getxattrs") \
|
||||
f(CMPXATTR, __CEPH_OSD_OP(RD, ATTR, 3), "cmpxattr") \
|
||||
\
|
||||
/* write */ \
|
||||
f(SETXATTR, __CEPH_OSD_OP(WR, ATTR, 1), "setxattr") \
|
||||
f(SETXATTRS, __CEPH_OSD_OP(WR, ATTR, 2), "setxattrs") \
|
||||
f(RESETXATTRS, __CEPH_OSD_OP(WR, ATTR, 3), "resetxattrs") \
|
||||
f(RMXATTR, __CEPH_OSD_OP(WR, ATTR, 4), "rmxattr") \
|
||||
\
|
||||
/** subop **/ \
|
||||
f(PULL, __CEPH_OSD_OP1(SUB, 1), "pull") \
|
||||
f(PUSH, __CEPH_OSD_OP1(SUB, 2), "push") \
|
||||
f(BALANCEREADS, __CEPH_OSD_OP1(SUB, 3), "balance-reads") \
|
||||
f(UNBALANCEREADS, __CEPH_OSD_OP1(SUB, 4), "unbalance-reads") \
|
||||
f(SCRUB, __CEPH_OSD_OP1(SUB, 5), "scrub") \
|
||||
f(SCRUB_RESERVE, __CEPH_OSD_OP1(SUB, 6), "scrub-reserve") \
|
||||
f(SCRUB_UNRESERVE, __CEPH_OSD_OP1(SUB, 7), "scrub-unreserve") \
|
||||
f(SCRUB_STOP, __CEPH_OSD_OP1(SUB, 8), "scrub-stop") \
|
||||
f(SCRUB_MAP, __CEPH_OSD_OP1(SUB, 9), "scrub-map") \
|
||||
\
|
||||
/** lock **/ \
|
||||
f(WRLOCK, __CEPH_OSD_OP(WR, LOCK, 1), "wrlock") \
|
||||
f(WRUNLOCK, __CEPH_OSD_OP(WR, LOCK, 2), "wrunlock") \
|
||||
f(RDLOCK, __CEPH_OSD_OP(WR, LOCK, 3), "rdlock") \
|
||||
f(RDUNLOCK, __CEPH_OSD_OP(WR, LOCK, 4), "rdunlock") \
|
||||
f(UPLOCK, __CEPH_OSD_OP(WR, LOCK, 5), "uplock") \
|
||||
f(DNLOCK, __CEPH_OSD_OP(WR, LOCK, 6), "dnlock") \
|
||||
\
|
||||
/** exec **/ \
|
||||
/* note: the RD bit here is wrong; see special-case below in helper */ \
|
||||
f(CALL, __CEPH_OSD_OP(RD, EXEC, 1), "call") \
|
||||
\
|
||||
/** pg **/ \
|
||||
f(PGLS, __CEPH_OSD_OP(RD, PG, 1), "pgls") \
|
||||
f(PGLS_FILTER, __CEPH_OSD_OP(RD, PG, 2), "pgls-filter") \
|
||||
f(PG_HITSET_LS, __CEPH_OSD_OP(RD, PG, 3), "pg-hitset-ls") \
|
||||
f(PG_HITSET_GET, __CEPH_OSD_OP(RD, PG, 4), "pg-hitset-get")
|
||||
|
||||
enum {
|
||||
/** data **/
|
||||
/* read */
|
||||
CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
|
||||
CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
|
||||
CEPH_OSD_OP_MAPEXT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 3,
|
||||
|
||||
/* fancy read */
|
||||
CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
|
||||
CEPH_OSD_OP_SPARSE_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 5,
|
||||
|
||||
CEPH_OSD_OP_NOTIFY = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 6,
|
||||
CEPH_OSD_OP_NOTIFY_ACK = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 7,
|
||||
|
||||
/* versioning */
|
||||
CEPH_OSD_OP_ASSERT_VER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 8,
|
||||
|
||||
/* write */
|
||||
CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
|
||||
CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2,
|
||||
CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3,
|
||||
CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4,
|
||||
CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5,
|
||||
|
||||
/* fancy write */
|
||||
CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6,
|
||||
CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7,
|
||||
CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8,
|
||||
CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9,
|
||||
|
||||
CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10,
|
||||
CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11,
|
||||
CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
|
||||
|
||||
CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
|
||||
CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14,
|
||||
|
||||
CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
|
||||
|
||||
/* omap */
|
||||
CEPH_OSD_OP_OMAPGETKEYS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 17,
|
||||
CEPH_OSD_OP_OMAPGETVALS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 18,
|
||||
CEPH_OSD_OP_OMAPGETHEADER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 19,
|
||||
CEPH_OSD_OP_OMAPGETVALSBYKEYS =
|
||||
CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 20,
|
||||
CEPH_OSD_OP_OMAPSETVALS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 21,
|
||||
CEPH_OSD_OP_OMAPSETHEADER = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 22,
|
||||
CEPH_OSD_OP_OMAPCLEAR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 23,
|
||||
CEPH_OSD_OP_OMAPRMKEYS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 24,
|
||||
CEPH_OSD_OP_OMAP_CMP = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 25,
|
||||
|
||||
/* hints */
|
||||
CEPH_OSD_OP_SETALLOCHINT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 35,
|
||||
|
||||
/** multi **/
|
||||
CEPH_OSD_OP_CLONERANGE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_MULTI | 1,
|
||||
CEPH_OSD_OP_ASSERT_SRC_VERSION = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 2,
|
||||
CEPH_OSD_OP_SRC_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 3,
|
||||
|
||||
/** attrs **/
|
||||
/* read */
|
||||
CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
|
||||
CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2,
|
||||
CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3,
|
||||
|
||||
/* write */
|
||||
CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1,
|
||||
CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2,
|
||||
CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3,
|
||||
CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
|
||||
|
||||
/** subop **/
|
||||
CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
|
||||
CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
|
||||
CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
|
||||
CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
|
||||
CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
|
||||
CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6,
|
||||
CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
|
||||
CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8,
|
||||
CEPH_OSD_OP_SCRUB_MAP = CEPH_OSD_OP_MODE_SUB | 9,
|
||||
|
||||
/** lock **/
|
||||
CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
|
||||
CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2,
|
||||
CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3,
|
||||
CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4,
|
||||
CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5,
|
||||
CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
|
||||
|
||||
/** exec **/
|
||||
/* note: the RD bit here is wrong; see special-case below in helper */
|
||||
CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
|
||||
|
||||
/** pg **/
|
||||
CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
|
||||
CEPH_OSD_OP_PGLS_FILTER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 2,
|
||||
#define GENERATE_ENUM_ENTRY(op, opcode, str) CEPH_OSD_OP_##op = (opcode),
|
||||
__CEPH_FORALL_OSD_OPS(GENERATE_ENUM_ENTRY)
|
||||
#undef GENERATE_ENUM_ENTRY
|
||||
};
|
||||
|
||||
static inline int ceph_osd_op_type_lock(int op)
|
||||
|
@@ -27,7 +27,6 @@
|
||||
|
||||
struct cgroup_root;
|
||||
struct cgroup_subsys;
|
||||
struct inode;
|
||||
struct cgroup;
|
||||
|
||||
extern int cgroup_init_early(void);
|
||||
@@ -38,7 +37,8 @@ extern void cgroup_exit(struct task_struct *p);
|
||||
extern int cgroupstats_build(struct cgroupstats *stats,
|
||||
struct dentry *dentry);
|
||||
|
||||
extern int proc_cgroup_show(struct seq_file *, void *);
|
||||
extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *tsk);
|
||||
|
||||
/* define the enumeration of all cgroup subsystems */
|
||||
#define SUBSYS(_x) _x ## _cgrp_id,
|
||||
@@ -161,11 +161,6 @@ static inline void css_put(struct cgroup_subsys_state *css)
|
||||
|
||||
/* bits in struct cgroup flags field */
|
||||
enum {
|
||||
/*
|
||||
* Control Group has previously had a child cgroup or a task,
|
||||
* but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
|
||||
*/
|
||||
CGRP_RELEASABLE,
|
||||
/* Control Group requires release notifications to userspace */
|
||||
CGRP_NOTIFY_ON_RELEASE,
|
||||
/*
|
||||
@@ -234,13 +229,6 @@ struct cgroup {
|
||||
*/
|
||||
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
|
||||
|
||||
/*
|
||||
* Linked list running through all cgroups that can
|
||||
* potentially be reaped by the release agent. Protected by
|
||||
* release_list_lock
|
||||
*/
|
||||
struct list_head release_list;
|
||||
|
||||
/*
|
||||
* list of pidlists, up to two for each namespace (one for procs, one
|
||||
* for tasks); created on demand.
|
||||
@@ -250,6 +238,9 @@ struct cgroup {
|
||||
|
||||
/* used to wait for offlining of csses */
|
||||
wait_queue_head_t offline_waitq;
|
||||
|
||||
/* used to schedule release agent */
|
||||
struct work_struct release_agent_work;
|
||||
};
|
||||
|
||||
#define MAX_CGROUP_ROOT_NAMELEN 64
|
||||
@@ -536,13 +527,10 @@ static inline bool cgroup_has_tasks(struct cgroup *cgrp)
|
||||
return !list_empty(&cgrp->cset_links);
|
||||
}
|
||||
|
||||
/* returns ino associated with a cgroup, 0 indicates unmounted root */
|
||||
/* returns ino associated with a cgroup */
|
||||
static inline ino_t cgroup_ino(struct cgroup *cgrp)
|
||||
{
|
||||
if (cgrp->kn)
|
||||
return cgrp->kn->ino;
|
||||
else
|
||||
return 0;
|
||||
return cgrp->kn->ino;
|
||||
}
|
||||
|
||||
/* cft/css accessors for cftype->write() operation */
|
||||
|
@@ -46,8 +46,10 @@ struct clk {
|
||||
unsigned int enable_count;
|
||||
unsigned int prepare_count;
|
||||
unsigned long accuracy;
|
||||
int phase;
|
||||
struct hlist_head children;
|
||||
struct hlist_node child_node;
|
||||
struct hlist_node debug_node;
|
||||
unsigned int notifier_count;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *dentry;
|
||||
|
@@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
|
||||
@@ -129,6 +130,14 @@ struct dentry;
|
||||
* set then clock accuracy will be initialized to parent accuracy
|
||||
* or 0 (perfect clock) if clock has no parent.
|
||||
*
|
||||
* @get_phase: Queries the hardware to get the current phase of a clock.
|
||||
* Returned values are 0-359 degrees on success, negative
|
||||
* error codes on failure.
|
||||
*
|
||||
* @set_phase: Shift the phase this clock signal in degrees specified
|
||||
* by the second argument. Valid values for degrees are
|
||||
* 0-359. Return 0 on success, otherwise -EERROR.
|
||||
*
|
||||
* @init: Perform platform-specific initialization magic.
|
||||
* This is not not used by any of the basic clock types.
|
||||
* Please consider other ways of solving initialization problems
|
||||
@@ -177,6 +186,8 @@ struct clk_ops {
|
||||
unsigned long parent_rate, u8 index);
|
||||
unsigned long (*recalc_accuracy)(struct clk_hw *hw,
|
||||
unsigned long parent_accuracy);
|
||||
int (*get_phase)(struct clk_hw *hw);
|
||||
int (*set_phase)(struct clk_hw *hw, int degrees);
|
||||
void (*init)(struct clk_hw *hw);
|
||||
int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
|
||||
};
|
||||
@@ -488,6 +499,28 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
|
||||
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
|
||||
unsigned long flags);
|
||||
|
||||
/***
|
||||
* struct clk_gpio_gate - gpio gated clock
|
||||
*
|
||||
* @hw: handle between common and hardware-specific interfaces
|
||||
* @gpiod: gpio descriptor
|
||||
*
|
||||
* Clock with a gpio control for enabling and disabling the parent clock.
|
||||
* Implements .enable, .disable and .is_enabled
|
||||
*/
|
||||
|
||||
struct clk_gpio {
|
||||
struct clk_hw hw;
|
||||
struct gpio_desc *gpiod;
|
||||
};
|
||||
|
||||
extern const struct clk_ops clk_gpio_gate_ops;
|
||||
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
|
||||
const char *parent_name, struct gpio_desc *gpio,
|
||||
unsigned long flags);
|
||||
|
||||
void of_gpio_clk_gate_setup(struct device_node *node);
|
||||
|
||||
/**
|
||||
* clk_register - allocate a new clock, register it and return an opaque cookie
|
||||
* @dev: device that is registering this clock
|
||||
|
@@ -106,6 +106,25 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
|
||||
*/
|
||||
long clk_get_accuracy(struct clk *clk);
|
||||
|
||||
/**
|
||||
* clk_set_phase - adjust the phase shift of a clock signal
|
||||
* @clk: clock signal source
|
||||
* @degrees: number of degrees the signal is shifted
|
||||
*
|
||||
* Shifts the phase of a clock signal by the specified degrees. Returns 0 on
|
||||
* success, -EERROR otherwise.
|
||||
*/
|
||||
int clk_set_phase(struct clk *clk, int degrees);
|
||||
|
||||
/**
|
||||
* clk_get_phase - return the phase shift of a clock signal
|
||||
* @clk: clock signal source
|
||||
*
|
||||
* Returns the phase shift of a clock node in degrees, otherwise returns
|
||||
* -EERROR.
|
||||
*/
|
||||
int clk_get_phase(struct clk *clk);
|
||||
|
||||
#else
|
||||
|
||||
static inline long clk_get_accuracy(struct clk *clk)
|
||||
@@ -113,6 +132,16 @@ static inline long clk_get_accuracy(struct clk *clk)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline long clk_set_phase(struct clk *clk, int phase)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline long clk_get_phase(struct clk *clk)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@@ -125,6 +125,7 @@ extern void __iomem *at91_pmc_base;
|
||||
#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */
|
||||
#define AT91_PMC_PLLADIV2_OFF (0 << 12)
|
||||
#define AT91_PMC_PLLADIV2_ON (1 << 12)
|
||||
#define AT91_PMC_H32MXDIV BIT(24)
|
||||
|
||||
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
|
||||
#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
|
||||
|
@@ -292,6 +292,7 @@ void omap2xxx_clkt_vps_init(void);
|
||||
void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
|
||||
void ti_dt_clocks_register(struct ti_dt_clk *oclks);
|
||||
void ti_dt_clk_init_provider(struct device_node *np, int index);
|
||||
void ti_dt_clk_init_retry_clks(void);
|
||||
void ti_dt_clockdomains_setup(void);
|
||||
int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
|
||||
ti_of_clk_init_cb_t func);
|
||||
|
@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
|
||||
extern void clocksource_change_rating(struct clocksource *cs, int rating);
|
||||
extern void clocksource_suspend(void);
|
||||
extern void clocksource_resume(void);
|
||||
extern struct clocksource * __init __weak clocksource_default_clock(void);
|
||||
extern struct clocksource * __init clocksource_default_clock(void);
|
||||
extern void clocksource_mark_unstable(struct clocksource *cs);
|
||||
|
||||
extern u64
|
||||
|
30
include/linux/cma.h
Normal file
30
include/linux/cma.h
Normal file
@@ -0,0 +1,30 @@
|
||||
#ifndef __CMA_H__
|
||||
#define __CMA_H__
|
||||
|
||||
/*
|
||||
* There is always at least global CMA area and a few optional
|
||||
* areas configured in kernel .config.
|
||||
*/
|
||||
#ifdef CONFIG_CMA_AREAS
|
||||
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||||
|
||||
#else
|
||||
#define MAX_CMA_AREAS (0)
|
||||
|
||||
#endif
|
||||
|
||||
struct cma;
|
||||
|
||||
extern phys_addr_t cma_get_base(struct cma *cma);
|
||||
extern unsigned long cma_get_size(struct cma *cma);
|
||||
|
||||
extern int __init cma_declare_contiguous(phys_addr_t base,
|
||||
phys_addr_t size, phys_addr_t limit,
|
||||
phys_addr_t alignment, unsigned int order_per_bit,
|
||||
bool fixed, struct cma **res_cma);
|
||||
extern int cma_init_reserved_mem(phys_addr_t base,
|
||||
phys_addr_t size, int order_per_bit,
|
||||
struct cma **res_cma);
|
||||
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
|
||||
extern bool cma_release(struct cma *cma, struct page *pages, int count);
|
||||
#endif
|
@@ -41,6 +41,35 @@ extern const struct net_device_ops com20020_netdev_ops;
|
||||
#define BUS_ALIGN 1
|
||||
#endif
|
||||
|
||||
#define PLX_PCI_MAX_CARDS 2
|
||||
|
||||
struct com20020_pci_channel_map {
|
||||
u32 bar;
|
||||
u32 offset;
|
||||
u32 size; /* 0x00 - auto, e.g. length of entire bar */
|
||||
};
|
||||
|
||||
struct com20020_pci_card_info {
|
||||
const char *name;
|
||||
int devcount;
|
||||
|
||||
struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
|
||||
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
struct com20020_priv {
|
||||
struct com20020_pci_card_info *ci;
|
||||
struct list_head list_dev;
|
||||
};
|
||||
|
||||
struct com20020_dev {
|
||||
struct list_head list;
|
||||
struct net_device *dev;
|
||||
|
||||
struct com20020_priv *pci_priv;
|
||||
int index;
|
||||
};
|
||||
|
||||
#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
|
||||
#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
|
||||
|
@@ -2,14 +2,24 @@
|
||||
#define _LINUX_COMPACTION_H
|
||||
|
||||
/* Return values for compact_zone() and try_to_compact_pages() */
|
||||
/* compaction didn't start as it was deferred due to past failures */
|
||||
#define COMPACT_DEFERRED 0
|
||||
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
|
||||
#define COMPACT_SKIPPED 0
|
||||
#define COMPACT_SKIPPED 1
|
||||
/* compaction should continue to another pageblock */
|
||||
#define COMPACT_CONTINUE 1
|
||||
#define COMPACT_CONTINUE 2
|
||||
/* direct compaction partially compacted a zone and there are suitable pages */
|
||||
#define COMPACT_PARTIAL 2
|
||||
#define COMPACT_PARTIAL 3
|
||||
/* The full zone was compacted */
|
||||
#define COMPACT_COMPLETE 3
|
||||
#define COMPACT_COMPLETE 4
|
||||
|
||||
/* Used to signal whether compaction detected need_sched() or lock contention */
|
||||
/* No contention detected */
|
||||
#define COMPACT_CONTENDED_NONE 0
|
||||
/* Either need_sched() was true or fatal signal pending */
|
||||
#define COMPACT_CONTENDED_SCHED 1
|
||||
/* Zone lock or lru_lock was contended in async compaction */
|
||||
#define COMPACT_CONTENDED_LOCK 2
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
extern int sysctl_compact_memory;
|
||||
@@ -22,7 +32,8 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
||||
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
||||
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
||||
int order, gfp_t gfp_mask, nodemask_t *mask,
|
||||
enum migrate_mode mode, bool *contended);
|
||||
enum migrate_mode mode, int *contended,
|
||||
struct zone **candidate_zone);
|
||||
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
||||
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
||||
extern unsigned long compaction_suitable(struct zone *zone, int order);
|
||||
@@ -91,7 +102,8 @@ static inline bool compaction_restarting(struct zone *zone, int order)
|
||||
#else
|
||||
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
||||
int order, gfp_t gfp_mask, nodemask_t *nodemask,
|
||||
enum migrate_mode mode, bool *contended)
|
||||
enum migrate_mode mode, int *contended,
|
||||
struct zone **candidate_zone)
|
||||
{
|
||||
return COMPACT_CONTINUE;
|
||||
}
|
||||
|
@@ -71,7 +71,6 @@
|
||||
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
|
||||
*
|
||||
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
|
||||
* Fixed in GCC 4.8.2 and later versions.
|
||||
*
|
||||
* (asm goto is automatically volatile - the naming reflects this.)
|
||||
*/
|
||||
|
65
include/linux/compiler-gcc5.h
Normal file
65
include/linux/compiler-gcc5.h
Normal file
@@ -0,0 +1,65 @@
|
||||
#ifndef __LINUX_COMPILER_H
|
||||
#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
|
||||
#endif
|
||||
|
||||
#define __used __attribute__((__used__))
|
||||
#define __must_check __attribute__((warn_unused_result))
|
||||
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
|
||||
|
||||
/* Mark functions as cold. gcc will assume any path leading to a call
|
||||
to them will be unlikely. This means a lot of manual unlikely()s
|
||||
are unnecessary now for any paths leading to the usual suspects
|
||||
like BUG(), printk(), panic() etc. [but let's keep them for now for
|
||||
older compilers]
|
||||
|
||||
Early snapshots of gcc 4.3 don't support this and we can't detect this
|
||||
in the preprocessor, but we can live with this because they're unreleased.
|
||||
Maketime probing would be overkill here.
|
||||
|
||||
gcc also has a __attribute__((__hot__)) to move hot functions into
|
||||
a special section, but I don't see any sense in this right now in
|
||||
the kernel context */
|
||||
#define __cold __attribute__((__cold__))
|
||||
|
||||
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
|
||||
|
||||
#ifndef __CHECKER__
|
||||
# define __compiletime_warning(message) __attribute__((warning(message)))
|
||||
# define __compiletime_error(message) __attribute__((error(message)))
|
||||
#endif /* __CHECKER__ */
|
||||
|
||||
/*
|
||||
* Mark a position in code as unreachable. This can be used to
|
||||
* suppress control flow warnings after asm blocks that transfer
|
||||
* control elsewhere.
|
||||
*
|
||||
* Early snapshots of gcc 4.5 don't support this and we can't detect
|
||||
* this in the preprocessor, but we can live with this because they're
|
||||
* unreleased. Really, we need to have autoconf for the kernel.
|
||||
*/
|
||||
#define unreachable() __builtin_unreachable()
|
||||
|
||||
/* Mark a function definition as prohibited from being cloned. */
|
||||
#define __noclone __attribute__((__noclone__))
|
||||
|
||||
/*
|
||||
* Tell the optimizer that something else uses this function or variable.
|
||||
*/
|
||||
#define __visible __attribute__((externally_visible))
|
||||
|
||||
/*
|
||||
* GCC 'asm goto' miscompiles certain code sequences:
|
||||
*
|
||||
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
|
||||
*
|
||||
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
|
||||
*
|
||||
* (asm goto is automatically volatile - the naming reflects this.)
|
||||
*/
|
||||
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
|
||||
|
||||
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
|
||||
#define __HAVE_BUILTIN_BSWAP32__
|
||||
#define __HAVE_BUILTIN_BSWAP64__
|
||||
#define __HAVE_BUILTIN_BSWAP16__
|
||||
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
|
@@ -213,6 +213,7 @@ extern struct bus_type cpu_subsys;
|
||||
extern void cpu_hotplug_begin(void);
|
||||
extern void cpu_hotplug_done(void);
|
||||
extern void get_online_cpus(void);
|
||||
extern bool try_get_online_cpus(void);
|
||||
extern void put_online_cpus(void);
|
||||
extern void cpu_hotplug_disable(void);
|
||||
extern void cpu_hotplug_enable(void);
|
||||
@@ -230,6 +231,7 @@ int cpu_down(unsigned int cpu);
|
||||
static inline void cpu_hotplug_begin(void) {}
|
||||
static inline void cpu_hotplug_done(void) {}
|
||||
#define get_online_cpus() do { } while (0)
|
||||
#define try_get_online_cpus() true
|
||||
#define put_online_cpus() do { } while (0)
|
||||
#define cpu_hotplug_disable() do { } while (0)
|
||||
#define cpu_hotplug_enable() do { } while (0)
|
||||
|
22
include/linux/cpufreq-dt.h
Normal file
22
include/linux/cpufreq-dt.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Marvell
|
||||
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __CPUFREQ_DT_H__
|
||||
#define __CPUFREQ_DT_H__
|
||||
|
||||
struct cpufreq_dt_platform_data {
|
||||
/*
|
||||
* True when each CPU has its own clock to control its
|
||||
* frequency, false when all CPUs are controlled by a single
|
||||
* clock.
|
||||
*/
|
||||
bool independent_clocks;
|
||||
};
|
||||
|
||||
#endif /* __CPUFREQ_DT_H__ */
|
@@ -112,6 +112,9 @@ struct cpufreq_policy {
|
||||
spinlock_t transition_lock;
|
||||
wait_queue_head_t transition_wait;
|
||||
struct task_struct *transition_task; /* Task which is doing the transition */
|
||||
|
||||
/* For cpufreq driver's internal use */
|
||||
void *driver_data;
|
||||
};
|
||||
|
||||
/* Only for ACPI */
|
||||
@@ -216,6 +219,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
struct cpufreq_driver {
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
u8 flags;
|
||||
void *driver_data;
|
||||
|
||||
/* needed by all drivers */
|
||||
int (*init) (struct cpufreq_policy *policy);
|
||||
@@ -309,6 +313,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data);
|
||||
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
|
||||
|
||||
const char *cpufreq_get_current_driver(void);
|
||||
void *cpufreq_get_driver_data(void);
|
||||
|
||||
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
|
||||
unsigned int min, unsigned int max)
|
||||
|
@@ -666,10 +666,19 @@ static inline size_t cpumask_size(void)
|
||||
*
|
||||
* This code makes NR_CPUS length memcopy and brings to a memory corruption.
|
||||
* cpumask_copy() provide safe copy functionality.
|
||||
*
|
||||
* Note that there is another evil here: If you define a cpumask_var_t
|
||||
* as a percpu variable then the way to obtain the address of the cpumask
|
||||
* structure differently influences what this_cpu_* operation needs to be
|
||||
* used. Please use this_cpu_cpumask_var_t in those cases. The direct use
|
||||
* of this_cpu_ptr() or this_cpu_read() will lead to failures when the
|
||||
* other type of cpumask_var_t implementation is configured.
|
||||
*/
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
typedef struct cpumask *cpumask_var_t;
|
||||
|
||||
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
|
||||
|
||||
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
|
||||
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
|
||||
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
|
||||
@@ -681,6 +690,8 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
|
||||
#else
|
||||
typedef struct cpumask cpumask_var_t[1];
|
||||
|
||||
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
|
||||
|
||||
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
|
||||
{
|
||||
return true;
|
||||
|
@@ -86,19 +86,20 @@ extern void __cpuset_memory_pressure_bump(void);
|
||||
|
||||
extern void cpuset_task_status_allowed(struct seq_file *m,
|
||||
struct task_struct *task);
|
||||
extern int proc_cpuset_show(struct seq_file *, void *);
|
||||
extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
|
||||
struct pid *pid, struct task_struct *tsk);
|
||||
|
||||
extern int cpuset_mem_spread_node(void);
|
||||
extern int cpuset_slab_spread_node(void);
|
||||
|
||||
static inline int cpuset_do_page_mem_spread(void)
|
||||
{
|
||||
return current->flags & PF_SPREAD_PAGE;
|
||||
return task_spread_page(current);
|
||||
}
|
||||
|
||||
static inline int cpuset_do_slab_mem_spread(void)
|
||||
{
|
||||
return current->flags & PF_SPREAD_SLAB;
|
||||
return task_spread_slab(current);
|
||||
}
|
||||
|
||||
extern int current_cpuset_is_being_rebound(void);
|
||||
|
@@ -14,14 +14,13 @@
|
||||
extern unsigned long long elfcorehdr_addr;
|
||||
extern unsigned long long elfcorehdr_size;
|
||||
|
||||
extern int __weak elfcorehdr_alloc(unsigned long long *addr,
|
||||
unsigned long long *size);
|
||||
extern void __weak elfcorehdr_free(unsigned long long addr);
|
||||
extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
|
||||
extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
|
||||
extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
|
||||
unsigned long from, unsigned long pfn,
|
||||
unsigned long size, pgprot_t prot);
|
||||
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
|
||||
extern void elfcorehdr_free(unsigned long long addr);
|
||||
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
|
||||
extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
|
||||
extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
|
||||
unsigned long from, unsigned long pfn,
|
||||
unsigned long size, pgprot_t prot);
|
||||
|
||||
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
|
||||
unsigned long, int);
|
||||
|
@@ -6,7 +6,8 @@
|
||||
#define CRC_T10DIF_DIGEST_SIZE 2
|
||||
#define CRC_T10DIF_BLOCK_SIZE 1
|
||||
|
||||
__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
|
||||
__u16 crc_t10dif(unsigned char const *, size_t);
|
||||
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
|
||||
size_t len);
|
||||
extern __u16 crc_t10dif(unsigned char const *, size_t);
|
||||
|
||||
#endif
|
||||
|
@@ -258,6 +258,15 @@ static inline void put_cred(const struct cred *_cred)
|
||||
#define current_cred() \
|
||||
rcu_dereference_protected(current->cred, 1)
|
||||
|
||||
/**
|
||||
* current_real_cred - Access the current task's objective credentials
|
||||
*
|
||||
* Access the objective credentials of the current task. RCU-safe,
|
||||
* since nobody else can modify it.
|
||||
*/
|
||||
#define current_real_cred() \
|
||||
rcu_dereference_protected(current->real_cred, 1)
|
||||
|
||||
/**
|
||||
* __task_cred - Access a task's objective credentials
|
||||
* @task: The task to query
|
||||
|
@@ -1,125 +0,0 @@
|
||||
#ifndef _CYCX_X25_H
|
||||
#define _CYCX_X25_H
|
||||
/*
|
||||
* cycx_x25.h Cyclom X.25 firmware API definitions.
|
||||
*
|
||||
* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
||||
*
|
||||
* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
|
||||
*
|
||||
* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
* ============================================================================
|
||||
* 2000/04/02 acme dprintk and cycx_debug
|
||||
* 1999/01/03 acme judicious use of data types
|
||||
* 1999/01/02 acme #define X25_ACK_N3 0x4411
|
||||
* 1998/12/28 acme cleanup: lot'o'things removed
|
||||
* commands listed,
|
||||
* TX25Cmd & TX25Config structs
|
||||
* typedef'ed
|
||||
*/
|
||||
#ifndef PACKED
|
||||
#define PACKED __attribute__((packed))
|
||||
#endif
|
||||
|
||||
/* X.25 shared memory layout. */
|
||||
#define X25_MBOX_OFFS 0x300 /* general mailbox block */
|
||||
#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
|
||||
|
||||
/* Debug */
|
||||
#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a)
|
||||
|
||||
extern unsigned int cycx_debug;
|
||||
|
||||
/* Data Structures */
|
||||
/* X.25 Command Block. */
|
||||
struct cycx_x25_cmd {
|
||||
u16 command;
|
||||
u16 link; /* values: 0 or 1 */
|
||||
u16 len; /* values: 0 thru 0x205 (517) */
|
||||
u32 buf;
|
||||
} PACKED;
|
||||
|
||||
/* Defines for the 'command' field. */
|
||||
#define X25_CONNECT_REQUEST 0x4401
|
||||
#define X25_CONNECT_RESPONSE 0x4402
|
||||
#define X25_DISCONNECT_REQUEST 0x4403
|
||||
#define X25_DISCONNECT_RESPONSE 0x4404
|
||||
#define X25_DATA_REQUEST 0x4405
|
||||
#define X25_ACK_TO_VC 0x4406
|
||||
#define X25_INTERRUPT_RESPONSE 0x4407
|
||||
#define X25_CONFIG 0x4408
|
||||
#define X25_CONNECT_INDICATION 0x4409
|
||||
#define X25_CONNECT_CONFIRM 0x440A
|
||||
#define X25_DISCONNECT_INDICATION 0x440B
|
||||
#define X25_DISCONNECT_CONFIRM 0x440C
|
||||
#define X25_DATA_INDICATION 0x440E
|
||||
#define X25_INTERRUPT_INDICATION 0x440F
|
||||
#define X25_ACK_FROM_VC 0x4410
|
||||
#define X25_ACK_N3 0x4411
|
||||
#define X25_CONNECT_COLLISION 0x4413
|
||||
#define X25_N3WIN 0x4414
|
||||
#define X25_LINE_ON 0x4415
|
||||
#define X25_LINE_OFF 0x4416
|
||||
#define X25_RESET_REQUEST 0x4417
|
||||
#define X25_LOG 0x4500
|
||||
#define X25_STATISTIC 0x4600
|
||||
#define X25_TRACE 0x4700
|
||||
#define X25_N2TRACEXC 0x4702
|
||||
#define X25_N3TRACEXC 0x4703
|
||||
|
||||
/**
|
||||
* struct cycx_x25_config - cyclom2x x25 firmware configuration
|
||||
* @link - link number
|
||||
* @speed - line speed
|
||||
* @clock - internal/external
|
||||
* @n2 - # of level 2 retransm.(values: 1 thru FF)
|
||||
* @n2win - level 2 window (values: 1 thru 7)
|
||||
* @n3win - level 3 window (values: 1 thru 7)
|
||||
* @nvc - # of logical channels (values: 1 thru 64)
|
||||
* @pktlen - level 3 packet length - log base 2 of size
|
||||
* @locaddr - my address
|
||||
* @remaddr - remote address
|
||||
* @t1 - time, in seconds
|
||||
* @t2 - time, in seconds
|
||||
* @t21 - time, in seconds
|
||||
* @npvc - # of permanent virt. circuits (1 thru nvc)
|
||||
* @t23 - time, in seconds
|
||||
* @flags - see dosx25.doc, in portuguese, for details
|
||||
*/
|
||||
struct cycx_x25_config {
|
||||
u8 link;
|
||||
u8 speed;
|
||||
u8 clock;
|
||||
u8 n2;
|
||||
u8 n2win;
|
||||
u8 n3win;
|
||||
u8 nvc;
|
||||
u8 pktlen;
|
||||
u8 locaddr;
|
||||
u8 remaddr;
|
||||
u16 t1;
|
||||
u16 t2;
|
||||
u8 t21;
|
||||
u8 npvc;
|
||||
u8 t23;
|
||||
u8 flags;
|
||||
} PACKED;
|
||||
|
||||
struct cycx_x25_stats {
|
||||
u16 rx_crc_errors;
|
||||
u16 rx_over_errors;
|
||||
u16 n2_tx_frames;
|
||||
u16 n2_rx_frames;
|
||||
u16 tx_timeouts;
|
||||
u16 rx_timeouts;
|
||||
u16 n3_tx_packets;
|
||||
u16 n3_rx_packets;
|
||||
u16 tx_aborts;
|
||||
u16 rx_aborts;
|
||||
} PACKED;
|
||||
#endif /* _CYCX_X25_H */
|
@@ -11,7 +11,6 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/lockref.h>
|
||||
|
||||
struct nameidata;
|
||||
struct path;
|
||||
struct vfsmount;
|
||||
|
||||
@@ -55,6 +54,7 @@ struct qstr {
|
||||
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
|
||||
#define hashlen_hash(hashlen) ((u32) (hashlen))
|
||||
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
|
||||
#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
|
||||
|
||||
struct dentry_stat_t {
|
||||
long nr_dentry;
|
||||
@@ -225,11 +225,6 @@ struct dentry_operations {
|
||||
|
||||
extern seqlock_t rename_lock;
|
||||
|
||||
static inline int dname_external(const struct dentry *dentry)
|
||||
{
|
||||
return dentry->d_name.name != dentry->d_iname;
|
||||
}
|
||||
|
||||
/*
|
||||
* These are the low-level FS interfaces to the dcache..
|
||||
*/
|
||||
@@ -249,10 +244,11 @@ extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
|
||||
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
|
||||
extern struct dentry *d_find_any_alias(struct inode *inode);
|
||||
extern struct dentry * d_obtain_alias(struct inode *);
|
||||
extern struct dentry * d_obtain_root(struct inode *);
|
||||
extern void shrink_dcache_sb(struct super_block *);
|
||||
extern void shrink_dcache_parent(struct dentry *);
|
||||
extern void shrink_dcache_for_umount(struct super_block *);
|
||||
extern int d_invalidate(struct dentry *);
|
||||
extern void d_invalidate(struct dentry *);
|
||||
|
||||
/* only used at mount-time */
|
||||
extern struct dentry * d_make_root(struct inode *);
|
||||
@@ -267,7 +263,6 @@ extern void d_prune_aliases(struct inode *);
|
||||
|
||||
/* test whether we have any submounts in a subdir tree */
|
||||
extern int have_submounts(struct dentry *);
|
||||
extern int check_submounts_and_drop(struct dentry *);
|
||||
|
||||
/*
|
||||
* This adds the entry to the hash queues.
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef DECOMPRESS_BUNZIP2_H
|
||||
#define DECOMPRESS_BUNZIP2_H
|
||||
|
||||
int bunzip2(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int bunzip2(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error)(char *x));
|
||||
#endif
|
||||
|
@@ -1,11 +1,11 @@
|
||||
#ifndef DECOMPRESS_GENERIC_H
|
||||
#define DECOMPRESS_GENERIC_H
|
||||
|
||||
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
typedef int (*decompress_fn) (unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *outbuf,
|
||||
int *posp,
|
||||
long *posp,
|
||||
void(*error)(char *x));
|
||||
|
||||
/* inbuf - input buffer
|
||||
@@ -33,7 +33,7 @@ typedef int (*decompress_fn) (unsigned char *inbuf, int len,
|
||||
|
||||
|
||||
/* Utility routine to detect the decompression method */
|
||||
decompress_fn decompress_method(const unsigned char *inbuf, int len,
|
||||
decompress_fn decompress_method(const unsigned char *inbuf, long len,
|
||||
const char **name);
|
||||
|
||||
#endif
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef LINUX_DECOMPRESS_INFLATE_H
|
||||
#define LINUX_DECOMPRESS_INFLATE_H
|
||||
|
||||
int gunzip(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int gunzip(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error_fn)(char *x));
|
||||
#endif
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef DECOMPRESS_UNLZ4_H
|
||||
#define DECOMPRESS_UNLZ4_H
|
||||
|
||||
int unlz4(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int unlz4(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error)(char *x));
|
||||
#endif
|
||||
|
@@ -1,11 +1,11 @@
|
||||
#ifndef DECOMPRESS_UNLZMA_H
|
||||
#define DECOMPRESS_UNLZMA_H
|
||||
|
||||
int unlzma(unsigned char *, int,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int unlzma(unsigned char *, long,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *posp,
|
||||
long *posp,
|
||||
void(*error)(char *x)
|
||||
);
|
||||
|
||||
|
@@ -1,10 +1,10 @@
|
||||
#ifndef DECOMPRESS_UNLZO_H
|
||||
#define DECOMPRESS_UNLZO_H
|
||||
|
||||
int unlzo(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
int unlzo(unsigned char *inbuf, long len,
|
||||
long (*fill)(void*, unsigned long),
|
||||
long (*flush)(void*, unsigned long),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
long *pos,
|
||||
void(*error)(char *x));
|
||||
#endif
|
||||
|
@@ -10,10 +10,10 @@
|
||||
#ifndef DECOMPRESS_UNXZ_H
|
||||
#define DECOMPRESS_UNXZ_H
|
||||
|
||||
int unxz(unsigned char *in, int in_size,
|
||||
int (*fill)(void *dest, unsigned int size),
|
||||
int (*flush)(void *src, unsigned int size),
|
||||
unsigned char *out, int *in_used,
|
||||
int unxz(unsigned char *in, long in_size,
|
||||
long (*fill)(void *dest, unsigned long size),
|
||||
long (*flush)(void *src, unsigned long size),
|
||||
unsigned char *out, long *in_used,
|
||||
void (*error)(char *x));
|
||||
|
||||
#endif
|
||||
|
35
include/linux/devcoredump.h
Normal file
35
include/linux/devcoredump.h
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef __DEVCOREDUMP_H
|
||||
#define __DEVCOREDUMP_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
|
||||
gfp_t gfp);
|
||||
|
||||
void dev_coredumpm(struct device *dev, struct module *owner,
|
||||
const void *data, size_t datalen, gfp_t gfp,
|
||||
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
|
||||
const void *data, size_t datalen),
|
||||
void (*free)(const void *data));
|
||||
#else
|
||||
static inline void dev_coredumpv(struct device *dev, const void *data,
|
||||
size_t datalen, gfp_t gfp)
|
||||
{
|
||||
vfree(data);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dev_coredumpm(struct device *dev, struct module *owner,
|
||||
const void *data, size_t datalen, gfp_t gfp,
|
||||
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
|
||||
const void *data, size_t datalen),
|
||||
void (*free)(const void *data))
|
||||
{
|
||||
free(data);
|
||||
}
|
||||
#endif /* CONFIG_DEV_COREDUMP */
|
||||
|
||||
#endif /* __DEVCOREDUMP_H */
|
@@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus,
|
||||
* with the device lock held in the core, so be careful.
|
||||
*/
|
||||
#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
|
||||
#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
|
||||
#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be
|
||||
#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
|
||||
#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
|
||||
#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
|
||||
bound */
|
||||
#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */
|
||||
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be
|
||||
#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
|
||||
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
|
||||
unbound */
|
||||
#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound
|
||||
#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
|
||||
from the device */
|
||||
|
||||
extern struct kset *bus_get_kset(struct bus_type *bus);
|
||||
@@ -607,8 +608,8 @@ extern int devres_release_group(struct device *dev, void *id);
|
||||
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
|
||||
extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
|
||||
va_list ap);
|
||||
extern char *devm_kasprintf(struct device *dev, gfp_t gfp,
|
||||
const char *fmt, ...);
|
||||
extern __printf(3, 4)
|
||||
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
|
||||
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
|
||||
{
|
||||
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
|
||||
|
@@ -53,18 +53,13 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
struct cma;
|
||||
struct page;
|
||||
struct device;
|
||||
|
||||
#ifdef CONFIG_DMA_CMA
|
||||
|
||||
/*
|
||||
* There is always at least global CMA area and a few optional device
|
||||
* private areas configured in kernel .config.
|
||||
*/
|
||||
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||||
|
||||
extern struct cma *dma_contiguous_default_area;
|
||||
|
||||
static inline struct cma *dev_get_cma_area(struct device *dev)
|
||||
@@ -123,8 +118,6 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
|
||||
#else
|
||||
|
||||
#define MAX_CMA_AREAS (0)
|
||||
|
||||
static inline struct cma *dev_get_cma_area(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
|
@@ -263,6 +263,32 @@ struct dma_attrs;
|
||||
#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
|
||||
dma_unmap_sg(dev, sgl, nents, dir)
|
||||
|
||||
#else
|
||||
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t gfp)
|
||||
{
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||
return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
|
||||
}
|
||||
|
||||
static inline void dma_free_writecombine(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr)
|
||||
{
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||
return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mmap_writecombine(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
|
||||
}
|
||||
#endif /* CONFIG_HAVE_DMA_ATTRS */
|
||||
|
||||
#ifdef CONFIG_NEED_DMA_MAP_STATE
|
||||
|
64
include/linux/dma/dw.h
Normal file
64
include/linux/dma/dw.h
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Driver for the Synopsys DesignWare DMA Controller
|
||||
*
|
||||
* Copyright (C) 2007 Atmel Corporation
|
||||
* Copyright (C) 2010-2011 ST Microelectronics
|
||||
* Copyright (C) 2014 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef _DMA_DW_H
|
||||
#define _DMA_DW_H
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
#include <linux/platform_data/dma-dw.h>
|
||||
|
||||
struct dw_dma;
|
||||
|
||||
/**
|
||||
* struct dw_dma_chip - representation of DesignWare DMA controller hardware
|
||||
* @dev: struct device of the DMA controller
|
||||
* @irq: irq line
|
||||
* @regs: memory mapped I/O space
|
||||
* @clk: hclk clock
|
||||
* @dw: struct dw_dma that is filed by dw_dma_probe()
|
||||
*/
|
||||
struct dw_dma_chip {
|
||||
struct device *dev;
|
||||
int irq;
|
||||
void __iomem *regs;
|
||||
struct clk *clk;
|
||||
struct dw_dma *dw;
|
||||
};
|
||||
|
||||
/* Export to the platform drivers */
|
||||
int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
|
||||
int dw_dma_remove(struct dw_dma_chip *chip);
|
||||
|
||||
/* DMA API extensions */
|
||||
struct dw_desc;
|
||||
|
||||
struct dw_cyclic_desc {
|
||||
struct dw_desc **desc;
|
||||
unsigned long periods;
|
||||
void (*period_callback)(void *param);
|
||||
void *period_callback_param;
|
||||
};
|
||||
|
||||
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
||||
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
|
||||
enum dma_transfer_direction direction);
|
||||
void dw_dma_cyclic_free(struct dma_chan *chan);
|
||||
int dw_dma_cyclic_start(struct dma_chan *chan);
|
||||
void dw_dma_cyclic_stop(struct dma_chan *chan);
|
||||
|
||||
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
|
||||
|
||||
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
|
||||
|
||||
#endif /* _DMA_DW_H */
|
@@ -37,7 +37,6 @@
|
||||
*/
|
||||
typedef s32 dma_cookie_t;
|
||||
#define DMA_MIN_COOKIE 1
|
||||
#define DMA_MAX_COOKIE INT_MAX
|
||||
|
||||
static inline int dma_submit_error(dma_cookie_t cookie)
|
||||
{
|
||||
@@ -200,15 +199,12 @@ enum dma_ctrl_flags {
|
||||
* configuration data in statically from the platform). An additional
|
||||
* argument of struct dma_slave_config must be passed in with this
|
||||
* command.
|
||||
* @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
|
||||
* into external start mode.
|
||||
*/
|
||||
enum dma_ctrl_cmd {
|
||||
DMA_TERMINATE_ALL,
|
||||
DMA_PAUSE,
|
||||
DMA_RESUME,
|
||||
DMA_SLAVE_CONFIG,
|
||||
FSLDMA_EXTERNAL_START,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -308,7 +304,9 @@ enum dma_slave_buswidth {
|
||||
* struct dma_slave_config - dma slave channel runtime config
|
||||
* @direction: whether the data shall go in or out on this slave
|
||||
* channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
|
||||
* legal values.
|
||||
* legal values. DEPRECATED, drivers should use the direction argument
|
||||
* to the device_prep_slave_sg and device_prep_dma_cyclic functions or
|
||||
* the dir field in the dma_interleaved_template structure.
|
||||
* @src_addr: this is the physical address where DMA slave data
|
||||
* should be read (RX), if the source is memory this argument is
|
||||
* ignored.
|
||||
@@ -671,7 +669,7 @@ struct dma_device {
|
||||
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
|
||||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags, void *context);
|
||||
unsigned long flags);
|
||||
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
|
||||
struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||||
unsigned long flags);
|
||||
@@ -746,7 +744,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
|
||||
unsigned long flags)
|
||||
{
|
||||
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
|
||||
period_len, dir, flags, NULL);
|
||||
period_len, dir, flags);
|
||||
}
|
||||
|
||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
||||
@@ -756,6 +754,16 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
||||
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
|
||||
}
|
||||
|
||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
|
||||
struct dma_chan *chan,
|
||||
struct scatterlist *dst_sg, unsigned int dst_nents,
|
||||
struct scatterlist *src_sg, unsigned int src_nents,
|
||||
unsigned long flags)
|
||||
{
|
||||
return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
|
||||
src_sg, src_nents, flags);
|
||||
}
|
||||
|
||||
static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
|
||||
{
|
||||
if (!chan || !caps)
|
||||
@@ -901,18 +909,6 @@ static inline void dmaengine_put(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
#define net_dmaengine_get() dmaengine_get()
|
||||
#define net_dmaengine_put() dmaengine_put()
|
||||
#else
|
||||
static inline void net_dmaengine_get(void)
|
||||
{
|
||||
}
|
||||
static inline void net_dmaengine_put(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ASYNC_TX_DMA
|
||||
#define async_dmaengine_get() dmaengine_get()
|
||||
#define async_dmaengine_put() dmaengine_put()
|
||||
@@ -934,16 +930,8 @@ async_dma_find_channel(enum dma_transaction_type type)
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_ASYNC_TX_DMA */
|
||||
|
||||
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
||||
void *dest, void *src, size_t len);
|
||||
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
|
||||
struct page *page, unsigned int offset, void *kdata, size_t len);
|
||||
dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
|
||||
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
|
||||
unsigned int src_off, size_t len);
|
||||
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
|
||||
struct dma_chan *chan);
|
||||
struct dma_chan *chan);
|
||||
|
||||
static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
|
@@ -56,13 +56,19 @@ struct dmar_drhd_unit {
|
||||
struct intel_iommu *iommu;
|
||||
};
|
||||
|
||||
struct dmar_pci_path {
|
||||
u8 bus;
|
||||
u8 device;
|
||||
u8 function;
|
||||
};
|
||||
|
||||
struct dmar_pci_notify_info {
|
||||
struct pci_dev *dev;
|
||||
unsigned long event;
|
||||
int bus;
|
||||
u16 seg;
|
||||
u16 level;
|
||||
struct acpi_dmar_pci_path path[];
|
||||
struct dmar_pci_path path[];
|
||||
} __attribute__((packed));
|
||||
|
||||
extern struct rw_semaphore dmar_global_lock;
|
||||
|
@@ -52,7 +52,7 @@
|
||||
#endif
|
||||
|
||||
extern const char *drbd_buildtag(void);
|
||||
#define REL_VERSION "8.4.3"
|
||||
#define REL_VERSION "8.4.5"
|
||||
#define API_VERSION 1
|
||||
#define PRO_VERSION_MIN 86
|
||||
#define PRO_VERSION_MAX 101
|
||||
@@ -245,7 +245,7 @@ enum drbd_disk_state {
|
||||
D_DISKLESS,
|
||||
D_ATTACHING, /* In the process of reading the meta-data */
|
||||
D_FAILED, /* Becomes D_DISKLESS as soon as we told it the peer */
|
||||
/* when >= D_FAILED it is legal to access mdev->bc */
|
||||
/* when >= D_FAILED it is legal to access mdev->ldev */
|
||||
D_NEGOTIATING, /* Late attaching state, we need to talk to the peer */
|
||||
D_INCONSISTENT,
|
||||
D_OUTDATED,
|
||||
|
@@ -171,6 +171,10 @@ GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf,
|
||||
__flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative)
|
||||
__flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF)
|
||||
/* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */
|
||||
/* 9: __str_field_def(31, DRBD_GENLA_F_MANDATORY, name, SHARED_SECRET_MAX) */
|
||||
/* 9: __u32_field(32, DRBD_F_REQUIRED | DRBD_F_INVARIANT, peer_node_id) */
|
||||
__flg_field_def(33, 0 /* OPTIONAL */, csums_after_crash_only, DRBD_CSUMS_AFTER_CRASH_ONLY_DEF)
|
||||
__u32_field_def(34, 0 /* OPTIONAL */, sock_check_timeo, DRBD_SOCKET_CHECK_TIMEO_DEF)
|
||||
)
|
||||
|
||||
GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms,
|
||||
|
@@ -214,6 +214,7 @@
|
||||
#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
|
||||
#define DRBD_ALWAYS_ASBP_DEF 0
|
||||
#define DRBD_USE_RLE_DEF 1
|
||||
#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
|
||||
|
||||
#define DRBD_AL_STRIPES_MIN 1
|
||||
#define DRBD_AL_STRIPES_MAX 1024
|
||||
@@ -224,4 +225,9 @@
|
||||
#define DRBD_AL_STRIPE_SIZE_MAX 16777216
|
||||
#define DRBD_AL_STRIPE_SIZE_DEF 32
|
||||
#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
|
||||
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_MIN 0
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
|
||||
#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
|
||||
#endif
|
||||
|
@@ -1,111 +0,0 @@
|
||||
/*
|
||||
* Driver for the Synopsys DesignWare DMA Controller
|
||||
*
|
||||
* Copyright (C) 2007 Atmel Corporation
|
||||
* Copyright (C) 2010-2011 ST Microelectronics
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef DW_DMAC_H
|
||||
#define DW_DMAC_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/**
|
||||
* struct dw_dma_slave - Controller-specific information about a slave
|
||||
*
|
||||
* @dma_dev: required DMA master device. Depricated.
|
||||
* @bus_id: name of this device channel, not just a device name since
|
||||
* devices may have more than one channel e.g. "foo_tx"
|
||||
* @cfg_hi: Platform-specific initializer for the CFG_HI register
|
||||
* @cfg_lo: Platform-specific initializer for the CFG_LO register
|
||||
* @src_master: src master for transfers on allocated channel.
|
||||
* @dst_master: dest master for transfers on allocated channel.
|
||||
*/
|
||||
struct dw_dma_slave {
|
||||
struct device *dma_dev;
|
||||
u32 cfg_hi;
|
||||
u32 cfg_lo;
|
||||
u8 src_master;
|
||||
u8 dst_master;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dw_dma_platform_data - Controller configuration parameters
|
||||
* @nr_channels: Number of channels supported by hardware (max 8)
|
||||
* @is_private: The device channels should be marked as private and not for
|
||||
* by the general purpose DMA channel allocator.
|
||||
* @chan_allocation_order: Allocate channels starting from 0 or 7
|
||||
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
|
||||
* @block_size: Maximum block size supported by the controller
|
||||
* @nr_masters: Number of AHB masters supported by the controller
|
||||
* @data_width: Maximum data width supported by hardware per AHB master
|
||||
* (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
|
||||
*/
|
||||
struct dw_dma_platform_data {
|
||||
unsigned int nr_channels;
|
||||
bool is_private;
|
||||
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
|
||||
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
|
||||
unsigned char chan_allocation_order;
|
||||
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
|
||||
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
|
||||
unsigned char chan_priority;
|
||||
unsigned short block_size;
|
||||
unsigned char nr_masters;
|
||||
unsigned char data_width[4];
|
||||
};
|
||||
|
||||
/* bursts size */
|
||||
enum dw_dma_msize {
|
||||
DW_DMA_MSIZE_1,
|
||||
DW_DMA_MSIZE_4,
|
||||
DW_DMA_MSIZE_8,
|
||||
DW_DMA_MSIZE_16,
|
||||
DW_DMA_MSIZE_32,
|
||||
DW_DMA_MSIZE_64,
|
||||
DW_DMA_MSIZE_128,
|
||||
DW_DMA_MSIZE_256,
|
||||
};
|
||||
|
||||
/* Platform-configurable bits in CFG_HI */
|
||||
#define DWC_CFGH_FCMODE (1 << 0)
|
||||
#define DWC_CFGH_FIFO_MODE (1 << 1)
|
||||
#define DWC_CFGH_PROTCTL(x) ((x) << 2)
|
||||
#define DWC_CFGH_SRC_PER(x) ((x) << 7)
|
||||
#define DWC_CFGH_DST_PER(x) ((x) << 11)
|
||||
|
||||
/* Platform-configurable bits in CFG_LO */
|
||||
#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
|
||||
#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
|
||||
#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
|
||||
#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
|
||||
#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
|
||||
#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
|
||||
#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
|
||||
#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
|
||||
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
|
||||
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
|
||||
|
||||
/* DMA API extensions */
|
||||
struct dw_cyclic_desc {
|
||||
struct dw_desc **desc;
|
||||
unsigned long periods;
|
||||
void (*period_callback)(void *param);
|
||||
void *period_callback_param;
|
||||
};
|
||||
|
||||
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
||||
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
|
||||
enum dma_transfer_direction direction);
|
||||
void dw_dma_cyclic_free(struct dma_chan *chan);
|
||||
int dw_dma_cyclic_start(struct dma_chan *chan);
|
||||
void dw_dma_cyclic_stop(struct dma_chan *chan);
|
||||
|
||||
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
|
||||
|
||||
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
|
||||
|
||||
#endif /* DW_DMAC_H */
|
@@ -42,7 +42,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
|
||||
#if defined(CONFIG_DYNAMIC_DEBUG)
|
||||
extern int ddebug_remove_module(const char *mod_name);
|
||||
extern __printf(2, 3)
|
||||
int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
|
||||
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
|
||||
|
||||
extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
|
||||
const char *modname);
|
||||
@@ -50,15 +50,15 @@ extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
|
||||
struct device;
|
||||
|
||||
extern __printf(3, 4)
|
||||
int __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
|
||||
const char *fmt, ...);
|
||||
void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
|
||||
const char *fmt, ...);
|
||||
|
||||
struct net_device;
|
||||
|
||||
extern __printf(3, 4)
|
||||
int __dynamic_netdev_dbg(struct _ddebug *descriptor,
|
||||
const struct net_device *dev,
|
||||
const char *fmt, ...);
|
||||
void __dynamic_netdev_dbg(struct _ddebug *descriptor,
|
||||
const struct net_device *dev,
|
||||
const char *fmt, ...);
|
||||
|
||||
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
|
||||
static struct _ddebug __aligned(8) \
|
||||
|
@@ -73,14 +73,22 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
|
||||
{
|
||||
BUG_ON(count > DQL_MAX_OBJECT);
|
||||
|
||||
dql->num_queued += count;
|
||||
dql->last_obj_cnt = count;
|
||||
|
||||
/* We want to force a write first, so that cpu do not attempt
|
||||
* to get cache line containing last_obj_cnt, num_queued, adj_limit
|
||||
* in Shared state, but directly does a Request For Ownership
|
||||
* It is only a hint, we use barrier() only.
|
||||
*/
|
||||
barrier();
|
||||
|
||||
dql->num_queued += count;
|
||||
}
|
||||
|
||||
/* Returns how many objects can be queued, < 0 indicates over limit. */
|
||||
static inline int dql_avail(const struct dql *dql)
|
||||
{
|
||||
return dql->adj_limit - dql->num_queued;
|
||||
return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
|
||||
}
|
||||
|
||||
/* Record number of completed objects and recalculate the limit. */
|
||||
|
@@ -194,6 +194,9 @@ static inline char *mc_event_error_type(const unsigned int err_type)
|
||||
* @MEM_DDR3: DDR3 RAM
|
||||
* @MEM_RDDR3: Registered DDR3 RAM
|
||||
* This is a variant of the DDR3 memories.
|
||||
* @MEM_DDR4: DDR4 RAM
|
||||
* @MEM_RDDR4: Registered DDR4 RAM
|
||||
* This is a variant of the DDR4 memories.
|
||||
*/
|
||||
enum mem_type {
|
||||
MEM_EMPTY = 0,
|
||||
@@ -213,6 +216,8 @@ enum mem_type {
|
||||
MEM_XDR,
|
||||
MEM_DDR3,
|
||||
MEM_RDDR3,
|
||||
MEM_DDR4,
|
||||
MEM_RDDR4,
|
||||
};
|
||||
|
||||
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
|
||||
|
@@ -92,6 +92,7 @@ typedef struct {
|
||||
#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
|
||||
#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
|
||||
#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
|
||||
#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
|
||||
#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
|
||||
#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
|
||||
#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
|
||||
@@ -502,6 +503,10 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
|
||||
typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
|
||||
u32 attr, unsigned long data_size,
|
||||
void *data);
|
||||
typedef efi_status_t
|
||||
efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
|
||||
u32 attr, unsigned long data_size, void *data);
|
||||
|
||||
typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
|
||||
typedef void efi_reset_system_t (int reset_type, efi_status_t status,
|
||||
unsigned long data_size, efi_char16_t *data);
|
||||
@@ -821,6 +826,7 @@ extern struct efi {
|
||||
efi_get_variable_t *get_variable;
|
||||
efi_get_next_variable_t *get_next_variable;
|
||||
efi_set_variable_t *set_variable;
|
||||
efi_set_variable_nonblocking_t *set_variable_nonblocking;
|
||||
efi_query_variable_info_t *query_variable_info;
|
||||
efi_update_capsule_t *update_capsule;
|
||||
efi_query_capsule_caps_t *query_capsule_caps;
|
||||
@@ -886,6 +892,13 @@ extern bool efi_poweroff_required(void);
|
||||
(md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
|
||||
(md) = (void *)(md) + (m)->desc_size)
|
||||
|
||||
/*
|
||||
* Format an EFI memory descriptor's type and attributes to a user-provided
|
||||
* character buffer, as per snprintf(), and return the buffer.
|
||||
*/
|
||||
char * __init efi_md_typeattr_format(char *buf, size_t size,
|
||||
const efi_memory_desc_t *md);
|
||||
|
||||
/**
|
||||
* efi_range_is_wc - check the WC bit on an address range
|
||||
* @start: starting kvirt address
|
||||
@@ -1034,6 +1047,7 @@ struct efivar_operations {
|
||||
efi_get_variable_t *get_variable;
|
||||
efi_get_next_variable_t *get_next_variable;
|
||||
efi_set_variable_t *set_variable;
|
||||
efi_set_variable_nonblocking_t *set_variable_nonblocking;
|
||||
efi_query_variable_store_t *query_variable_store;
|
||||
};
|
||||
|
||||
@@ -1156,6 +1170,9 @@ int efivars_sysfs_init(void);
|
||||
#ifdef CONFIG_EFI_RUNTIME_MAP
|
||||
int efi_runtime_map_init(struct kobject *);
|
||||
void efi_runtime_map_setup(void *, int, u32);
|
||||
int efi_get_runtime_map_size(void);
|
||||
int efi_get_runtime_map_desc_size(void);
|
||||
int efi_runtime_map_copy(void *buf, size_t bufsz);
|
||||
#else
|
||||
static inline int efi_runtime_map_init(struct kobject *kobj)
|
||||
{
|
||||
@@ -1164,6 +1181,22 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
|
||||
|
||||
static inline void
|
||||
efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
|
||||
|
||||
static inline int efi_get_runtime_map_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int efi_get_runtime_map_desc_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* prototypes shared between arch specific and generic stub code */
|
||||
@@ -1208,4 +1241,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
|
||||
unsigned long *load_addr,
|
||||
unsigned long *load_size);
|
||||
|
||||
efi_status_t efi_parse_options(char *cmdline);
|
||||
|
||||
bool efi_runtime_disabled(void);
|
||||
#endif /* _LINUX_EFI_H */
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
u32 eth_get_headlen(void *data, unsigned int max_len);
|
||||
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
extern const struct header_ops eth_header_ops;
|
||||
|
||||
|
@@ -257,6 +257,10 @@ struct ethtool_ops {
|
||||
struct ethtool_eeprom *, u8 *);
|
||||
int (*get_eee)(struct net_device *, struct ethtool_eee *);
|
||||
int (*set_eee)(struct net_device *, struct ethtool_eee *);
|
||||
int (*get_tunable)(struct net_device *,
|
||||
const struct ethtool_tunable *, void *);
|
||||
int (*set_tunable)(struct net_device *,
|
||||
const struct ethtool_tunable *, const void *);
|
||||
|
||||
|
||||
};
|
||||
|
@@ -34,8 +34,10 @@
|
||||
* @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
|
||||
* @state_on: print_state is overriden with state_on if attached.
|
||||
* If NULL, default method of extcon class is used.
|
||||
* @state_off: print_state is overriden with state_on if detached.
|
||||
* @state_off: print_state is overriden with state_off if detached.
|
||||
* If NUll, default method of extcon class is used.
|
||||
* @check_on_resume: Boolean describing whether to check the state of gpio
|
||||
* while resuming from sleep.
|
||||
*
|
||||
* Note that in order for state_on or state_off to be valid, both state_on
|
||||
* and state_off should be not NULL. If at least one of them is NULL,
|
||||
|
@@ -1,287 +0,0 @@
|
||||
/*
|
||||
* sm5502.h
|
||||
*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_EXTCON_SM5502_H
|
||||
#define __LINUX_EXTCON_SM5502_H
|
||||
|
||||
enum sm5502_types {
|
||||
TYPE_SM5502,
|
||||
};
|
||||
|
||||
/* SM5502 registers */
|
||||
enum sm5502_reg {
|
||||
SM5502_REG_DEVICE_ID = 0x01,
|
||||
SM5502_REG_CONTROL,
|
||||
SM5502_REG_INT1,
|
||||
SM5502_REG_INT2,
|
||||
SM5502_REG_INTMASK1,
|
||||
SM5502_REG_INTMASK2,
|
||||
SM5502_REG_ADC,
|
||||
SM5502_REG_TIMING_SET1,
|
||||
SM5502_REG_TIMING_SET2,
|
||||
SM5502_REG_DEV_TYPE1,
|
||||
SM5502_REG_DEV_TYPE2,
|
||||
SM5502_REG_BUTTON1,
|
||||
SM5502_REG_BUTTON2,
|
||||
SM5502_REG_CAR_KIT_STATUS,
|
||||
SM5502_REG_RSVD1,
|
||||
SM5502_REG_RSVD2,
|
||||
SM5502_REG_RSVD3,
|
||||
SM5502_REG_RSVD4,
|
||||
SM5502_REG_MANUAL_SW1,
|
||||
SM5502_REG_MANUAL_SW2,
|
||||
SM5502_REG_DEV_TYPE3,
|
||||
SM5502_REG_RSVD5,
|
||||
SM5502_REG_RSVD6,
|
||||
SM5502_REG_RSVD7,
|
||||
SM5502_REG_RSVD8,
|
||||
SM5502_REG_RSVD9,
|
||||
SM5502_REG_RESET,
|
||||
SM5502_REG_RSVD10,
|
||||
SM5502_REG_RESERVED_ID1,
|
||||
SM5502_REG_RSVD11,
|
||||
SM5502_REG_RSVD12,
|
||||
SM5502_REG_RESERVED_ID2,
|
||||
SM5502_REG_RSVD13,
|
||||
SM5502_REG_OCP,
|
||||
SM5502_REG_RSVD14,
|
||||
SM5502_REG_RSVD15,
|
||||
SM5502_REG_RSVD16,
|
||||
SM5502_REG_RSVD17,
|
||||
SM5502_REG_RSVD18,
|
||||
SM5502_REG_RSVD19,
|
||||
SM5502_REG_RSVD20,
|
||||
SM5502_REG_RSVD21,
|
||||
SM5502_REG_RSVD22,
|
||||
SM5502_REG_RSVD23,
|
||||
SM5502_REG_RSVD24,
|
||||
SM5502_REG_RSVD25,
|
||||
SM5502_REG_RSVD26,
|
||||
SM5502_REG_RSVD27,
|
||||
SM5502_REG_RSVD28,
|
||||
SM5502_REG_RSVD29,
|
||||
SM5502_REG_RSVD30,
|
||||
SM5502_REG_RSVD31,
|
||||
SM5502_REG_RSVD32,
|
||||
SM5502_REG_RSVD33,
|
||||
SM5502_REG_RSVD34,
|
||||
SM5502_REG_RSVD35,
|
||||
SM5502_REG_RSVD36,
|
||||
SM5502_REG_RESERVED_ID3,
|
||||
|
||||
SM5502_REG_END,
|
||||
};
|
||||
|
||||
/* Define SM5502 MASK/SHIFT constant */
|
||||
#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0
|
||||
#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3
|
||||
#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT)
|
||||
#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT)
|
||||
|
||||
#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0
|
||||
#define SM5502_REG_CONTROL_WAIT_SHIFT 1
|
||||
#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2
|
||||
#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3
|
||||
#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4
|
||||
#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT)
|
||||
#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT)
|
||||
#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT)
|
||||
#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
|
||||
#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
|
||||
|
||||
#define SM5502_REG_INTM1_ATTACH_SHIFT 0
|
||||
#define SM5502_REG_INTM1_DETACH_SHIFT 1
|
||||
#define SM5502_REG_INTM1_KP_SHIFT 2
|
||||
#define SM5502_REG_INTM1_LKP_SHIFT 3
|
||||
#define SM5502_REG_INTM1_LKR_SHIFT 4
|
||||
#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5
|
||||
#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6
|
||||
#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7
|
||||
#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT)
|
||||
#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT)
|
||||
#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT)
|
||||
#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT)
|
||||
#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT)
|
||||
#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT)
|
||||
#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT)
|
||||
#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT)
|
||||
|
||||
#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0
|
||||
#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1
|
||||
#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4
|
||||
#define SM5502_REG_INTM2_MHL_SHIFT 5
|
||||
#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT)
|
||||
#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT)
|
||||
#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT)
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT)
|
||||
#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
|
||||
#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
|
||||
|
||||
#define SM5502_REG_ADC_SHIFT 0
|
||||
#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT)
|
||||
|
||||
#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4
|
||||
#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT)
|
||||
#define TIMING_KEY_PRESS_100MS 0x0
|
||||
#define TIMING_KEY_PRESS_200MS 0x1
|
||||
#define TIMING_KEY_PRESS_300MS 0x2
|
||||
#define TIMING_KEY_PRESS_400MS 0x3
|
||||
#define TIMING_KEY_PRESS_500MS 0x4
|
||||
#define TIMING_KEY_PRESS_600MS 0x5
|
||||
#define TIMING_KEY_PRESS_700MS 0x6
|
||||
#define TIMING_KEY_PRESS_800MS 0x7
|
||||
#define TIMING_KEY_PRESS_900MS 0x8
|
||||
#define TIMING_KEY_PRESS_1000MS 0x9
|
||||
#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0
|
||||
#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT)
|
||||
#define TIMING_ADC_DET_50MS 0x0
|
||||
#define TIMING_ADC_DET_100MS 0x1
|
||||
#define TIMING_ADC_DET_150MS 0x2
|
||||
#define TIMING_ADC_DET_200MS 0x3
|
||||
#define TIMING_ADC_DET_300MS 0x4
|
||||
#define TIMING_ADC_DET_400MS 0x5
|
||||
#define TIMING_ADC_DET_500MS 0x6
|
||||
#define TIMING_ADC_DET_600MS 0x7
|
||||
#define TIMING_ADC_DET_700MS 0x8
|
||||
#define TIMING_ADC_DET_800MS 0x9
|
||||
#define TIMING_ADC_DET_900MS 0xA
|
||||
#define TIMING_ADC_DET_1000MS 0xB
|
||||
|
||||
#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4
|
||||
#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT)
|
||||
#define TIMING_SW_WAIT_10MS 0x0
|
||||
#define TIMING_SW_WAIT_30MS 0x1
|
||||
#define TIMING_SW_WAIT_50MS 0x2
|
||||
#define TIMING_SW_WAIT_70MS 0x3
|
||||
#define TIMING_SW_WAIT_90MS 0x4
|
||||
#define TIMING_SW_WAIT_110MS 0x5
|
||||
#define TIMING_SW_WAIT_130MS 0x6
|
||||
#define TIMING_SW_WAIT_150MS 0x7
|
||||
#define TIMING_SW_WAIT_170MS 0x8
|
||||
#define TIMING_SW_WAIT_190MS 0x9
|
||||
#define TIMING_SW_WAIT_210MS 0xA
|
||||
#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0
|
||||
#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT)
|
||||
#define TIMING_LONG_KEY_300MS 0x0
|
||||
#define TIMING_LONG_KEY_400MS 0x1
|
||||
#define TIMING_LONG_KEY_500MS 0x2
|
||||
#define TIMING_LONG_KEY_600MS 0x3
|
||||
#define TIMING_LONG_KEY_700MS 0x4
|
||||
#define TIMING_LONG_KEY_800MS 0x5
|
||||
#define TIMING_LONG_KEY_900MS 0x6
|
||||
#define TIMING_LONG_KEY_1000MS 0x7
|
||||
#define TIMING_LONG_KEY_1100MS 0x8
|
||||
#define TIMING_LONG_KEY_1200MS 0x9
|
||||
#define TIMING_LONG_KEY_1300MS 0xA
|
||||
#define TIMING_LONG_KEY_1400MS 0xB
|
||||
#define TIMING_LONG_KEY_1500MS 0xC
|
||||
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1
|
||||
#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2
|
||||
#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3
|
||||
#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4
|
||||
#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5
|
||||
#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6
|
||||
#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
|
||||
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3
|
||||
#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4
|
||||
#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5
|
||||
#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT)
|
||||
#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT)
|
||||
|
||||
#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0
|
||||
#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2
|
||||
#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5
|
||||
#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT)
|
||||
#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT)
|
||||
#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT)
|
||||
#define VBUSIN_SWITCH_OPEN 0x0
|
||||
#define VBUSIN_SWITCH_VBUSOUT 0x1
|
||||
#define VBUSIN_SWITCH_MIC 0x2
|
||||
#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3
|
||||
#define DM_DP_CON_SWITCH_OPEN 0x0
|
||||
#define DM_DP_CON_SWITCH_USB 0x1
|
||||
#define DM_DP_CON_SWITCH_AUDIO 0x2
|
||||
#define DM_DP_CON_SWITCH_UART 0x3
|
||||
#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
|
||||
/* SM5502 Interrupts */
|
||||
enum sm5502_irq {
|
||||
/* INT1 */
|
||||
SM5502_IRQ_INT1_ATTACH,
|
||||
SM5502_IRQ_INT1_DETACH,
|
||||
SM5502_IRQ_INT1_KP,
|
||||
SM5502_IRQ_INT1_LKP,
|
||||
SM5502_IRQ_INT1_LKR,
|
||||
SM5502_IRQ_INT1_OVP_EVENT,
|
||||
SM5502_IRQ_INT1_OCP_EVENT,
|
||||
SM5502_IRQ_INT1_OVP_OCP_DIS,
|
||||
|
||||
/* INT2 */
|
||||
SM5502_IRQ_INT2_VBUS_DET,
|
||||
SM5502_IRQ_INT2_REV_ACCE,
|
||||
SM5502_IRQ_INT2_ADC_CHG,
|
||||
SM5502_IRQ_INT2_STUCK_KEY,
|
||||
SM5502_IRQ_INT2_STUCK_KEY_RCV,
|
||||
SM5502_IRQ_INT2_MHL,
|
||||
|
||||
SM5502_IRQ_NUM,
|
||||
};
|
||||
|
||||
#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0)
|
||||
#define SM5502_IRQ_INT1_DETACH_MASK BIT(1)
|
||||
#define SM5502_IRQ_INT1_KP_MASK BIT(2)
|
||||
#define SM5502_IRQ_INT1_LKP_MASK BIT(3)
|
||||
#define SM5502_IRQ_INT1_LKR_MASK BIT(4)
|
||||
#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5)
|
||||
#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6)
|
||||
#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7)
|
||||
#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0)
|
||||
#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1)
|
||||
#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2)
|
||||
#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3)
|
||||
#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4)
|
||||
#define SM5502_IRQ_INT2_MHL_MASK BIT(5)
|
||||
|
||||
#endif /* __LINUX_EXTCON_SM5502_H */
|
@@ -15,8 +15,9 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
|
||||
#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */
|
||||
#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */
|
||||
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
|
||||
#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
|
||||
#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
|
||||
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
|
||||
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
|
||||
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
|
||||
@@ -24,6 +25,9 @@
|
||||
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
|
||||
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
|
||||
|
||||
/* 0, 1(node nid), 2(meta nid) are reserved node id */
|
||||
#define F2FS_RESERVED_NODE_NUM 3
|
||||
|
||||
#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
|
||||
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
|
||||
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
|
||||
@@ -82,11 +86,14 @@ struct f2fs_super_block {
|
||||
/*
|
||||
* For checkpoint
|
||||
*/
|
||||
#define CP_FSCK_FLAG 0x00000010
|
||||
#define CP_ERROR_FLAG 0x00000008
|
||||
#define CP_COMPACT_SUM_FLAG 0x00000004
|
||||
#define CP_ORPHAN_PRESENT_FLAG 0x00000002
|
||||
#define CP_UMOUNT_FLAG 0x00000001
|
||||
|
||||
#define F2FS_CP_PACKS 2 /* # of checkpoint packs */
|
||||
|
||||
struct f2fs_checkpoint {
|
||||
__le64 checkpoint_ver; /* checkpoint block version number */
|
||||
__le64 user_block_count; /* # of user blocks */
|
||||
@@ -123,6 +130,9 @@ struct f2fs_checkpoint {
|
||||
*/
|
||||
#define F2FS_ORPHANS_PER_BLOCK 1020
|
||||
|
||||
#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
|
||||
F2FS_ORPHANS_PER_BLOCK)
|
||||
|
||||
struct f2fs_orphan_block {
|
||||
__le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
|
||||
__le32 reserved; /* reserved */
|
||||
@@ -144,6 +154,7 @@ struct f2fs_extent {
|
||||
#define F2FS_NAME_LEN 255
|
||||
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
|
||||
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
|
||||
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
|
||||
#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
|
||||
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
|
||||
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
|
||||
@@ -163,8 +174,9 @@ struct f2fs_extent {
|
||||
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
|
||||
F2FS_INLINE_XATTR_ADDRS - 1))
|
||||
|
||||
#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) \
|
||||
- sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1))
|
||||
#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
|
||||
sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
|
||||
DEF_NIDS_PER_INODE - 1))
|
||||
|
||||
struct f2fs_inode {
|
||||
__le16 i_mode; /* file mode */
|
||||
@@ -194,7 +206,7 @@ struct f2fs_inode {
|
||||
|
||||
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
|
||||
|
||||
__le32 i_nid[5]; /* direct(2), indirect(2),
|
||||
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
|
||||
double_indirect(1) node id */
|
||||
} __packed;
|
||||
|
||||
|
@@ -553,7 +553,7 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
|
||||
#define fb_memcpy_fromfb sbus_memcpy_fromio
|
||||
#define fb_memcpy_tofb sbus_memcpy_toio
|
||||
|
||||
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
|
||||
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
|
||||
|
||||
#define fb_readb __raw_readb
|
||||
#define fb_readw __raw_readw
|
||||
|
@@ -4,58 +4,24 @@
|
||||
#ifndef __LINUX_FILTER_H__
|
||||
#define __LINUX_FILTER_H__
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include <uapi/linux/filter.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
/* Internally used and optimized filter representation with extended
|
||||
* instruction set based on top of classic BPF.
|
||||
*/
|
||||
|
||||
/* instruction classes */
|
||||
#define BPF_ALU64 0x07 /* alu mode in double word width */
|
||||
|
||||
/* ld/ldx fields */
|
||||
#define BPF_DW 0x18 /* double word */
|
||||
#define BPF_XADD 0xc0 /* exclusive add */
|
||||
|
||||
/* alu/jmp fields */
|
||||
#define BPF_MOV 0xb0 /* mov reg to reg */
|
||||
#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
|
||||
|
||||
/* change endianness of a register */
|
||||
#define BPF_END 0xd0 /* flags for endianness conversion: */
|
||||
#define BPF_TO_LE 0x00 /* convert to little-endian */
|
||||
#define BPF_TO_BE 0x08 /* convert to big-endian */
|
||||
#define BPF_FROM_LE BPF_TO_LE
|
||||
#define BPF_FROM_BE BPF_TO_BE
|
||||
|
||||
#define BPF_JNE 0x50 /* jump != */
|
||||
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
|
||||
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
|
||||
#define BPF_CALL 0x80 /* function call */
|
||||
#define BPF_EXIT 0x90 /* function return */
|
||||
|
||||
/* Register numbers */
|
||||
enum {
|
||||
BPF_REG_0 = 0,
|
||||
BPF_REG_1,
|
||||
BPF_REG_2,
|
||||
BPF_REG_3,
|
||||
BPF_REG_4,
|
||||
BPF_REG_5,
|
||||
BPF_REG_6,
|
||||
BPF_REG_7,
|
||||
BPF_REG_8,
|
||||
BPF_REG_9,
|
||||
BPF_REG_10,
|
||||
__MAX_BPF_REG,
|
||||
};
|
||||
|
||||
/* BPF has 10 general purpose 64-bit registers and stack frame. */
|
||||
#define MAX_BPF_REG __MAX_BPF_REG
|
||||
struct sk_buff;
|
||||
struct sock;
|
||||
struct seccomp_data;
|
||||
struct bpf_prog_aux;
|
||||
|
||||
/* ArgX, context and stack frame pointer register positions. Note,
|
||||
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
|
||||
@@ -161,6 +127,30 @@ enum {
|
||||
.off = 0, \
|
||||
.imm = IMM })
|
||||
|
||||
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
|
||||
#define BPF_LD_IMM64(DST, IMM) \
|
||||
BPF_LD_IMM64_RAW(DST, 0, IMM)
|
||||
|
||||
#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_LD | BPF_DW | BPF_IMM, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = 0, \
|
||||
.imm = (__u32) (IMM) }), \
|
||||
((struct bpf_insn) { \
|
||||
.code = 0, /* zero is reserved opcode */ \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.imm = ((__u64) (IMM)) >> 32 })
|
||||
|
||||
#define BPF_PSEUDO_MAP_FD 1
|
||||
|
||||
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
|
||||
#define BPF_LD_MAP_FD(DST, MAP_FD) \
|
||||
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
|
||||
|
||||
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
|
||||
|
||||
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
|
||||
@@ -299,14 +289,6 @@ enum {
|
||||
#define SK_RUN_FILTER(filter, ctx) \
|
||||
(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
|
||||
|
||||
struct bpf_insn {
|
||||
__u8 code; /* opcode */
|
||||
__u8 dst_reg:4; /* dest register */
|
||||
__u8 src_reg:4; /* source register */
|
||||
__s16 off; /* signed offset */
|
||||
__s32 imm; /* signed immediate constant */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/* A struct sock_filter is architecture independent. */
|
||||
struct compat_sock_fprog {
|
||||
@@ -320,20 +302,23 @@ struct sock_fprog_kern {
|
||||
struct sock_filter *filter;
|
||||
};
|
||||
|
||||
struct sk_buff;
|
||||
struct sock;
|
||||
struct seccomp_data;
|
||||
struct bpf_binary_header {
|
||||
unsigned int pages;
|
||||
u8 image[];
|
||||
};
|
||||
|
||||
struct bpf_prog {
|
||||
u32 jited:1, /* Is our filter JIT'ed? */
|
||||
len:31; /* Number of filter blocks */
|
||||
u16 pages; /* Number of allocated pages */
|
||||
bool jited; /* Is our filter JIT'ed? */
|
||||
u32 len; /* Number of filter blocks */
|
||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
||||
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
||||
const struct bpf_insn *filter);
|
||||
/* Instructions for interpreter */
|
||||
union {
|
||||
struct sock_filter insns[0];
|
||||
struct bpf_insn insnsi[0];
|
||||
struct work_struct work;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -353,6 +338,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
|
||||
|
||||
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||
|
||||
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
set_memory_ro((unsigned long)fp, fp->pages);
|
||||
}
|
||||
|
||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
set_memory_rw((unsigned long)fp, fp->pages);
|
||||
}
|
||||
#else
|
||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
|
||||
|
||||
int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
void bpf_prog_select_runtime(struct bpf_prog *fp);
|
||||
@@ -361,6 +366,17 @@ void bpf_prog_free(struct bpf_prog *fp);
|
||||
int bpf_convert_filter(struct sock_filter *prog, int len,
|
||||
struct bpf_insn *new_prog, int *new_len);
|
||||
|
||||
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
|
||||
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||
gfp_t gfp_extra_flags);
|
||||
void __bpf_prog_free(struct bpf_prog *fp);
|
||||
|
||||
static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
|
||||
{
|
||||
bpf_prog_unlock_ro(fp);
|
||||
__bpf_prog_free(fp);
|
||||
}
|
||||
|
||||
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
|
||||
void bpf_prog_destroy(struct bpf_prog *fp);
|
||||
|
||||
@@ -377,6 +393,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
||||
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
void bpf_int_jit_compile(struct bpf_prog *fp);
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
|
||||
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
unsigned int alignment,
|
||||
bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
||||
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *fp);
|
||||
void bpf_jit_free(struct bpf_prog *fp);
|
||||
|
||||
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||
u32 pass, void *image)
|
||||
{
|
||||
pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
|
||||
flen, proglen, pass, image);
|
||||
if (image)
|
||||
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
|
||||
16, 1, image, proglen, false);
|
||||
}
|
||||
#else
|
||||
static inline void bpf_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
bpf_prog_unlock_free(fp);
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
#define BPF_ANC BIT(15)
|
||||
|
||||
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
|
||||
@@ -424,36 +472,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
|
||||
return bpf_internal_load_pointer_neg_helper(skb, k, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
#include <stdarg.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/printk.h>
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *fp);
|
||||
void bpf_jit_free(struct bpf_prog *fp);
|
||||
|
||||
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||
u32 pass, void *image)
|
||||
{
|
||||
pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
|
||||
flen, proglen, pass, image);
|
||||
if (image)
|
||||
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
|
||||
16, 1, image, proglen, false);
|
||||
}
|
||||
#else
|
||||
#include <linux/slab.h>
|
||||
|
||||
static inline void bpf_jit_compile(struct bpf_prog *fp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_jit_free(struct bpf_prog *fp)
|
||||
{
|
||||
kfree(fp);
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
static inline int bpf_tell_extensions(void)
|
||||
{
|
||||
return SKF_AD_MAX;
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
/*
|
||||
* When maximum proportion of some event type is specified, this is the
|
||||
@@ -32,7 +33,7 @@ struct fprop_global {
|
||||
seqcount_t sequence;
|
||||
};
|
||||
|
||||
int fprop_global_init(struct fprop_global *p);
|
||||
int fprop_global_init(struct fprop_global *p, gfp_t gfp);
|
||||
void fprop_global_destroy(struct fprop_global *p);
|
||||
bool fprop_new_period(struct fprop_global *p, int periods);
|
||||
|
||||
@@ -79,7 +80,7 @@ struct fprop_local_percpu {
|
||||
raw_spinlock_t lock; /* Protect period and numerator */
|
||||
};
|
||||
|
||||
int fprop_local_init_percpu(struct fprop_local_percpu *pl);
|
||||
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
|
||||
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
|
||||
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
|
||||
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
|
||||
|
@@ -31,6 +31,7 @@ struct font_desc {
|
||||
#define SUN12x22_IDX 7
|
||||
#define ACORN8x8_IDX 8
|
||||
#define MINI4x6_IDX 9
|
||||
#define FONT6x10_IDX 10
|
||||
|
||||
extern const struct font_desc font_vga_8x8,
|
||||
font_vga_8x16,
|
||||
@@ -41,7 +42,8 @@ extern const struct font_desc font_vga_8x8,
|
||||
font_sun_8x16,
|
||||
font_sun_12x22,
|
||||
font_acorn_8x8,
|
||||
font_mini_4x6;
|
||||
font_mini_4x6,
|
||||
font_6x10;
|
||||
|
||||
/* Find a font with a specific name */
|
||||
|
||||
|
@@ -192,8 +192,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
#define READ 0
|
||||
#define WRITE RW_MASK
|
||||
#define READA RWA_MASK
|
||||
#define KERNEL_READ (READ|REQ_KERNEL)
|
||||
#define KERNEL_WRITE (WRITE|REQ_KERNEL)
|
||||
|
||||
#define READ_SYNC (READ | REQ_SYNC)
|
||||
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
|
||||
@@ -224,6 +222,13 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
|
||||
#define ATTR_TIMES_SET (1 << 16)
|
||||
|
||||
/*
|
||||
* Whiteout is represented by a char device. The following constants define the
|
||||
* mode and device number to use.
|
||||
*/
|
||||
#define WHITEOUT_MODE 0
|
||||
#define WHITEOUT_DEV 0
|
||||
|
||||
/*
|
||||
* This is the Inode Attributes structure, used for notify_change(). It
|
||||
* uses the above definitions as flags, to know which values have changed.
|
||||
@@ -256,6 +261,12 @@ struct iattr {
|
||||
*/
|
||||
#include <linux/quota.h>
|
||||
|
||||
/*
|
||||
* Maximum number of layers of fs stack. Needs to be limited to
|
||||
* prevent kernel stack overflow
|
||||
*/
|
||||
#define FILESYSTEM_MAX_STACK_DEPTH 2
|
||||
|
||||
/**
|
||||
* enum positive_aop_returns - aop return codes with specific semantics
|
||||
*
|
||||
@@ -387,7 +398,7 @@ struct address_space {
|
||||
struct inode *host; /* owner: inode, block_device */
|
||||
struct radix_tree_root page_tree; /* radix tree of all pages */
|
||||
spinlock_t tree_lock; /* and lock protecting it */
|
||||
unsigned int i_mmap_writable;/* count VM_SHARED mappings */
|
||||
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
|
||||
struct rb_root i_mmap; /* tree of private and shared mappings */
|
||||
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
|
||||
struct mutex i_mmap_mutex; /* protect tree, count, list */
|
||||
@@ -470,10 +481,35 @@ static inline int mapping_mapped(struct address_space *mapping)
|
||||
* Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
|
||||
* marks vma as VM_SHARED if it is shared, and the file was opened for
|
||||
* writing i.e. vma may be mprotected writable even if now readonly.
|
||||
*
|
||||
* If i_mmap_writable is negative, no new writable mappings are allowed. You
|
||||
* can only deny writable mappings, if none exists right now.
|
||||
*/
|
||||
static inline int mapping_writably_mapped(struct address_space *mapping)
|
||||
{
|
||||
return mapping->i_mmap_writable != 0;
|
||||
return atomic_read(&mapping->i_mmap_writable) > 0;
|
||||
}
|
||||
|
||||
static inline int mapping_map_writable(struct address_space *mapping)
|
||||
{
|
||||
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
|
||||
0 : -EPERM;
|
||||
}
|
||||
|
||||
static inline void mapping_unmap_writable(struct address_space *mapping)
|
||||
{
|
||||
atomic_dec(&mapping->i_mmap_writable);
|
||||
}
|
||||
|
||||
static inline int mapping_deny_writable(struct address_space *mapping)
|
||||
{
|
||||
return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
|
||||
0 : -EBUSY;
|
||||
}
|
||||
|
||||
static inline void mapping_allow_writable(struct address_space *mapping)
|
||||
{
|
||||
atomic_inc(&mapping->i_mmap_writable);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -603,11 +639,13 @@ static inline int inode_unhashed(struct inode *inode)
|
||||
* 2: child/target
|
||||
* 3: xattr
|
||||
* 4: second non-directory
|
||||
* The last is for certain operations (such as rename) which lock two
|
||||
* 5: second parent (when locking independent directories in rename)
|
||||
*
|
||||
* I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
|
||||
* non-directories at once.
|
||||
*
|
||||
* The locking order between these classes is
|
||||
* parent -> child -> normal -> xattr -> second non-directory
|
||||
* parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
|
||||
*/
|
||||
enum inode_i_mutex_lock_class
|
||||
{
|
||||
@@ -615,7 +653,8 @@ enum inode_i_mutex_lock_class
|
||||
I_MUTEX_PARENT,
|
||||
I_MUTEX_CHILD,
|
||||
I_MUTEX_XATTR,
|
||||
I_MUTEX_NONDIR2
|
||||
I_MUTEX_NONDIR2,
|
||||
I_MUTEX_PARENT2,
|
||||
};
|
||||
|
||||
void lock_two_nondirectories(struct inode *, struct inode*);
|
||||
@@ -826,13 +865,7 @@ static inline struct file *get_file(struct file *f)
|
||||
*/
|
||||
#define FILE_LOCK_DEFERRED 1
|
||||
|
||||
/*
|
||||
* The POSIX file lock owner is determined by
|
||||
* the "struct files_struct" in the thread group
|
||||
* (or NULL for no owner - BSD locks).
|
||||
*
|
||||
* Lockd stuffs a "host" pointer into this.
|
||||
*/
|
||||
/* legacy typedef, should eventually be removed */
|
||||
typedef void *fl_owner_t;
|
||||
|
||||
struct file_lock_operations {
|
||||
@@ -843,10 +876,13 @@ struct file_lock_operations {
|
||||
struct lock_manager_operations {
|
||||
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
|
||||
unsigned long (*lm_owner_key)(struct file_lock *);
|
||||
void (*lm_get_owner)(struct file_lock *, struct file_lock *);
|
||||
void (*lm_put_owner)(struct file_lock *);
|
||||
void (*lm_notify)(struct file_lock *); /* unblock callback */
|
||||
int (*lm_grant)(struct file_lock *, struct file_lock *, int);
|
||||
void (*lm_break)(struct file_lock *);
|
||||
int (*lm_change)(struct file_lock **, int);
|
||||
int (*lm_grant)(struct file_lock *, int);
|
||||
bool (*lm_break)(struct file_lock *);
|
||||
int (*lm_change)(struct file_lock **, int, struct list_head *);
|
||||
void (*lm_setup)(struct file_lock *, void **);
|
||||
};
|
||||
|
||||
struct lock_manager {
|
||||
@@ -941,7 +977,7 @@ void locks_free_lock(struct file_lock *fl);
|
||||
extern void locks_init_lock(struct file_lock *);
|
||||
extern struct file_lock * locks_alloc_lock(void);
|
||||
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
|
||||
extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
|
||||
extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
|
||||
extern void locks_remove_posix(struct file *, fl_owner_t);
|
||||
extern void locks_remove_file(struct file *);
|
||||
extern void locks_release_private(struct file_lock *);
|
||||
@@ -955,11 +991,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
|
||||
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
|
||||
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
|
||||
extern void lease_get_mtime(struct inode *, struct timespec *time);
|
||||
extern int generic_setlease(struct file *, long, struct file_lock **);
|
||||
extern int vfs_setlease(struct file *, long, struct file_lock **);
|
||||
extern int lease_modify(struct file_lock **, int);
|
||||
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
|
||||
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
|
||||
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
|
||||
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
|
||||
extern int lease_modify(struct file_lock **, int, struct list_head *);
|
||||
#else /* !CONFIG_FILE_LOCKING */
|
||||
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
|
||||
struct flock __user *user)
|
||||
@@ -988,12 +1022,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
|
||||
#endif
|
||||
static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
|
||||
{
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int fcntl_getlease(struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
return F_UNLCK;
|
||||
}
|
||||
|
||||
static inline void locks_init_lock(struct file_lock *fl)
|
||||
@@ -1001,7 +1035,7 @@ static inline void locks_init_lock(struct file_lock *fl)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
|
||||
static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
|
||||
{
|
||||
return;
|
||||
}
|
||||
@@ -1075,33 +1109,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
|
||||
}
|
||||
|
||||
static inline int generic_setlease(struct file *filp, long arg,
|
||||
struct file_lock **flp)
|
||||
struct file_lock **flp, void **priv)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int vfs_setlease(struct file *filp, long arg,
|
||||
struct file_lock **lease)
|
||||
struct file_lock **lease, void **priv)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int lease_modify(struct file_lock **before, int arg)
|
||||
static inline int lease_modify(struct file_lock **before, int arg,
|
||||
struct list_head *dispose)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int lock_may_read(struct inode *inode, loff_t start,
|
||||
unsigned long len)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int lock_may_write(struct inode *inode, loff_t start,
|
||||
unsigned long len)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif /* !CONFIG_FILE_LOCKING */
|
||||
|
||||
|
||||
@@ -1126,8 +1149,8 @@ extern void fasync_free(struct fasync_struct *);
|
||||
/* can be called from interrupts */
|
||||
extern void kill_fasync(struct fasync_struct **, int, int);
|
||||
|
||||
extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
|
||||
extern int f_setown(struct file *filp, unsigned long arg, int force);
|
||||
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
|
||||
extern void f_setown(struct file *filp, unsigned long arg, int force);
|
||||
extern void f_delown(struct file *filp);
|
||||
extern pid_t f_getown(struct file *filp);
|
||||
extern int send_sigurg(struct fown_struct *fown);
|
||||
@@ -1250,6 +1273,7 @@ struct super_block {
|
||||
|
||||
/* AIO completions deferred from interrupt context */
|
||||
struct workqueue_struct *s_dio_done_wq;
|
||||
struct hlist_head s_pins;
|
||||
|
||||
/*
|
||||
* Keep the lru lists last in the structure so they always sit on their
|
||||
@@ -1258,6 +1282,11 @@ struct super_block {
|
||||
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
|
||||
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
|
||||
struct rcu_head rcu;
|
||||
|
||||
/*
|
||||
* Indicates how deep in a filesystem stack this SB is
|
||||
*/
|
||||
int s_stack_depth;
|
||||
};
|
||||
|
||||
extern struct timespec current_fs_time(struct super_block *sb);
|
||||
@@ -1390,6 +1419,7 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino
|
||||
extern int vfs_rmdir(struct inode *, struct dentry *);
|
||||
extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
|
||||
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
|
||||
extern int vfs_whiteout(struct inode *, struct dentry *);
|
||||
|
||||
/*
|
||||
* VFS dentry helper functions.
|
||||
@@ -1480,7 +1510,7 @@ struct file_operations {
|
||||
int (*flock) (struct file *, int, struct file_lock *);
|
||||
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
|
||||
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
|
||||
int (*setlease)(struct file *, long, struct file_lock **);
|
||||
int (*setlease)(struct file *, long, struct file_lock **, void **);
|
||||
long (*fallocate)(struct file *file, int mode, loff_t offset,
|
||||
loff_t len);
|
||||
int (*show_fdinfo)(struct seq_file *m, struct file *f);
|
||||
@@ -1520,6 +1550,9 @@ struct inode_operations {
|
||||
umode_t create_mode, int *opened);
|
||||
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
|
||||
int (*set_acl)(struct inode *, struct posix_acl *, int);
|
||||
|
||||
/* WARNING: probably going away soon, do not use! */
|
||||
int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
|
||||
} ____cacheline_aligned;
|
||||
|
||||
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
|
||||
@@ -1617,6 +1650,9 @@ struct super_operations {
|
||||
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
|
||||
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
|
||||
|
||||
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
|
||||
(inode)->i_rdev == WHITEOUT_DEV)
|
||||
|
||||
/*
|
||||
* Inode state bits. Protected by inode->i_lock
|
||||
*
|
||||
@@ -1829,7 +1865,8 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
|
||||
extern void kern_unmount(struct vfsmount *mnt);
|
||||
extern int may_umount_tree(struct vfsmount *);
|
||||
extern int may_umount(struct vfsmount *);
|
||||
extern long do_mount(const char *, const char *, const char *, unsigned long, void *);
|
||||
extern long do_mount(const char *, const char __user *,
|
||||
const char *, unsigned long, void *);
|
||||
extern struct vfsmount *collect_mounts(struct path *);
|
||||
extern void drop_collected_mounts(struct vfsmount *);
|
||||
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
|
||||
@@ -1848,7 +1885,7 @@ extern int current_umask(void);
|
||||
extern void ihold(struct inode * inode);
|
||||
extern void iput(struct inode *);
|
||||
|
||||
static inline struct inode *file_inode(struct file *f)
|
||||
static inline struct inode *file_inode(const struct file *f)
|
||||
{
|
||||
return f->f_inode;
|
||||
}
|
||||
@@ -2031,6 +2068,7 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
|
||||
extern struct file *filp_open(const char *, int, umode_t);
|
||||
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
|
||||
const char *, int);
|
||||
extern int vfs_open(const struct path *, struct file *, const struct cred *);
|
||||
extern struct file * dentry_open(const struct path *, int, const struct cred *);
|
||||
extern int filp_close(struct file *, fl_owner_t id);
|
||||
|
||||
@@ -2244,7 +2282,9 @@ extern sector_t bmap(struct inode *, sector_t);
|
||||
#endif
|
||||
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
|
||||
extern int inode_permission(struct inode *, int);
|
||||
extern int __inode_permission(struct inode *, int);
|
||||
extern int generic_permission(struct inode *, int);
|
||||
extern int __check_sticky(struct inode *dir, struct inode *inode);
|
||||
|
||||
static inline bool execute_ok(struct inode *inode)
|
||||
{
|
||||
@@ -2335,6 +2375,7 @@ extern int do_pipe_flags(int *, int);
|
||||
|
||||
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
|
||||
extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
|
||||
extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
|
||||
extern struct file * open_exec(const char *);
|
||||
|
||||
/* fs/dcache.c -- generic fs support functions */
|
||||
@@ -2428,6 +2469,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
|
||||
extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
|
||||
|
||||
/* fs/block_dev.c */
|
||||
extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
|
||||
extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
|
||||
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
@@ -2442,6 +2484,9 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
|
||||
struct file *, loff_t *, size_t, unsigned int);
|
||||
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
|
||||
struct file *out, loff_t *, size_t len, unsigned int flags);
|
||||
extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
|
||||
loff_t *opos, size_t len, unsigned int flags);
|
||||
|
||||
|
||||
extern void
|
||||
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
|
||||
@@ -2584,6 +2629,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping,
|
||||
struct page *page, void *fsdata);
|
||||
extern int always_delete_dentry(const struct dentry *);
|
||||
extern struct inode *alloc_anon_inode(struct super_block *);
|
||||
extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
|
||||
extern const struct dentry_operations simple_dentry_operations;
|
||||
|
||||
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
|
||||
@@ -2688,7 +2734,7 @@ static const struct file_operations __fops = { \
|
||||
.read = simple_attr_read, \
|
||||
.write = simple_attr_write, \
|
||||
.llseek = generic_file_llseek, \
|
||||
};
|
||||
}
|
||||
|
||||
static inline __printf(1, 2)
|
||||
void __simple_attr_check_format(const char *fmt, ...)
|
||||
@@ -2726,6 +2772,14 @@ static inline int is_sxid(umode_t mode)
|
||||
return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
|
||||
}
|
||||
|
||||
static inline int check_sticky(struct inode *dir, struct inode *inode)
|
||||
{
|
||||
if (!(dir->i_mode & S_ISVTX))
|
||||
return 0;
|
||||
|
||||
return __check_sticky(dir, inode);
|
||||
}
|
||||
|
||||
static inline void inode_has_no_xattr(struct inode *inode)
|
||||
{
|
||||
if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
|
||||
|
@@ -139,7 +139,6 @@ struct fs_platform_info {
|
||||
int rx_ring, tx_ring; /* number of buffers on rx */
|
||||
__u8 macaddr[ETH_ALEN]; /* mac address */
|
||||
int rx_copybreak; /* limit we copy small frames */
|
||||
int use_napi; /* use NAPI */
|
||||
int napi_weight; /* NAPI weight */
|
||||
|
||||
int use_rmii; /* use RMII mode */
|
||||
|
17
include/linux/fs_pin.h
Normal file
17
include/linux/fs_pin.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#include <linux/fs.h>
|
||||
|
||||
struct fs_pin {
|
||||
atomic_long_t count;
|
||||
union {
|
||||
struct {
|
||||
struct hlist_node s_list;
|
||||
struct hlist_node m_list;
|
||||
};
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
void (*kill)(struct fs_pin *);
|
||||
};
|
||||
|
||||
void pin_put(struct fs_pin *);
|
||||
void pin_remove(struct fs_pin *);
|
||||
void pin_insert(struct fs_pin *, struct vfsmount *);
|
@@ -781,13 +781,13 @@ struct fsl_ifc_regs {
|
||||
__be32 amask;
|
||||
u32 res4[0x2];
|
||||
} amask_cs[FSL_IFC_BANK_COUNT];
|
||||
u32 res5[0x17];
|
||||
u32 res5[0x18];
|
||||
struct {
|
||||
__be32 csor_ext;
|
||||
__be32 csor;
|
||||
__be32 csor_ext;
|
||||
u32 res6;
|
||||
} csor_cs[FSL_IFC_BANK_COUNT];
|
||||
u32 res7[0x19];
|
||||
u32 res7[0x18];
|
||||
struct {
|
||||
__be32 ftim[4];
|
||||
u32 res8[0x8];
|
||||
|
13
include/linux/fsldma.h
Normal file
13
include/linux/fsldma.h
Normal file
@@ -0,0 +1,13 @@
|
||||
/*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef FSL_DMA_H
|
||||
#define FSL_DMA_H
|
||||
/* fsl dma API for enxternal start */
|
||||
int fsl_dma_external_start(struct dma_chan *dchan, int enable);
|
||||
|
||||
#endif
|
@@ -322,16 +322,18 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
|
||||
extern void fsnotify_destroy_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event);
|
||||
/* attach the event to the group notification queue */
|
||||
extern int fsnotify_add_notify_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *));
|
||||
extern int fsnotify_add_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *));
|
||||
/* Remove passed event from groups notification queue */
|
||||
extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
|
||||
/* true if the group notification queue is empty */
|
||||
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
|
||||
/* return, but do not dequeue the first event on the notification queue */
|
||||
extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
|
||||
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
|
||||
/* return AND dequeue the first event on the notification queue */
|
||||
extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
|
||||
extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
|
||||
|
||||
/* functions used to manipulate the marks attached to inodes */
|
||||
|
||||
|
@@ -56,6 +56,8 @@ struct ftrace_ops;
|
||||
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op, struct pt_regs *regs);
|
||||
|
||||
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
|
||||
|
||||
/*
|
||||
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
|
||||
* set in the flags member.
|
||||
@@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
|
||||
* INITIALIZED - The ftrace_ops has already been initialized (first use time
|
||||
* register_ftrace_function() is called, it will initialized the ops)
|
||||
* DELETED - The ops are being deleted, do not let them be registered again.
|
||||
* ADDING - The ops is in the process of being added.
|
||||
* REMOVING - The ops is in the process of being removed.
|
||||
* MODIFYING - The ops is in the process of changing its filter functions.
|
||||
*/
|
||||
enum {
|
||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||
@@ -100,8 +105,20 @@ enum {
|
||||
FTRACE_OPS_FL_STUB = 1 << 6,
|
||||
FTRACE_OPS_FL_INITIALIZED = 1 << 7,
|
||||
FTRACE_OPS_FL_DELETED = 1 << 8,
|
||||
FTRACE_OPS_FL_ADDING = 1 << 9,
|
||||
FTRACE_OPS_FL_REMOVING = 1 << 10,
|
||||
FTRACE_OPS_FL_MODIFYING = 1 << 11,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
/* The hash used to know what functions callbacks trace */
|
||||
struct ftrace_ops_hash {
|
||||
struct ftrace_hash *notrace_hash;
|
||||
struct ftrace_hash *filter_hash;
|
||||
struct mutex regex_lock;
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Note, ftrace_ops can be referenced outside of RCU protection.
|
||||
* (Although, for perf, the control ops prevent that). If ftrace_ops is
|
||||
@@ -121,10 +138,9 @@ struct ftrace_ops {
|
||||
int __percpu *disabled;
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
int nr_trampolines;
|
||||
struct ftrace_hash *notrace_hash;
|
||||
struct ftrace_hash *filter_hash;
|
||||
struct ftrace_hash *tramp_hash;
|
||||
struct mutex regex_lock;
|
||||
struct ftrace_ops_hash local_hash;
|
||||
struct ftrace_ops_hash *func_hash;
|
||||
struct ftrace_ops_hash old_hash;
|
||||
unsigned long trampoline;
|
||||
#endif
|
||||
};
|
||||
|
@@ -571,40 +571,6 @@ do { \
|
||||
__trace_printk(ip, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* tracepoint_string - register constant persistent string to trace system
|
||||
* @str - a constant persistent string that will be referenced in tracepoints
|
||||
*
|
||||
* If constant strings are being used in tracepoints, it is faster and
|
||||
* more efficient to just save the pointer to the string and reference
|
||||
* that with a printf "%s" instead of saving the string in the ring buffer
|
||||
* and wasting space and time.
|
||||
*
|
||||
* The problem with the above approach is that userspace tools that read
|
||||
* the binary output of the trace buffers do not have access to the string.
|
||||
* Instead they just show the address of the string which is not very
|
||||
* useful to users.
|
||||
*
|
||||
* With tracepoint_string(), the string will be registered to the tracing
|
||||
* system and exported to userspace via the debugfs/tracing/printk_formats
|
||||
* file that maps the string address to the string text. This way userspace
|
||||
* tools that read the binary buffers have a way to map the pointers to
|
||||
* the ASCII strings they represent.
|
||||
*
|
||||
* The @str used must be a constant string and persistent as it would not
|
||||
* make sense to show a string that no longer exists. But it is still fine
|
||||
* to be used with modules, because when modules are unloaded, if they
|
||||
* had tracepoints, the ring buffers are cleared too. As long as the string
|
||||
* does not change during the life of the module, it is fine to use
|
||||
* tracepoint_string() within a module.
|
||||
*/
|
||||
#define tracepoint_string(str) \
|
||||
({ \
|
||||
static const char *___tp_str __tracepoint_string = str; \
|
||||
___tp_str; \
|
||||
})
|
||||
#define __tracepoint_string __attribute__((section("__tracepoint_str")))
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct perf_event;
|
||||
|
||||
|
@@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
|
||||
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
|
||||
unsigned long start, unsigned int nr, void *data);
|
||||
|
||||
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
|
||||
unsigned long size, unsigned long start, unsigned int nr,
|
||||
void *data);
|
||||
|
||||
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
|
||||
unsigned long start, unsigned int nr, void *data);
|
||||
|
||||
@@ -117,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev,
|
||||
int min_alloc_order, int nid);
|
||||
extern struct gen_pool *dev_get_gen_pool(struct device *dev);
|
||||
|
||||
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
|
||||
size_t size);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
|
||||
const char *propname, int index);
|
||||
|
@@ -178,12 +178,12 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
|
||||
#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \
|
||||
nla = ntb[attr_nr]; \
|
||||
if (nla) { \
|
||||
if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \
|
||||
if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
|
||||
pr_info("<< must not change invariant attr: %s\n", #name); \
|
||||
return -EEXIST; \
|
||||
} \
|
||||
assignment; \
|
||||
} else if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \
|
||||
} else if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
|
||||
/* attribute missing from payload, */ \
|
||||
/* which was expected */ \
|
||||
} else if ((attr_flag) & DRBD_F_REQUIRED) { \
|
||||
|
@@ -156,7 +156,7 @@ struct vm_area_struct;
|
||||
#define GFP_DMA32 __GFP_DMA32
|
||||
|
||||
/* Convert GFP flags to their corresponding migrate type */
|
||||
static inline int allocflags_to_migratetype(gfp_t gfp_flags)
|
||||
static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
|
||||
{
|
||||
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
|
||||
|
||||
@@ -360,7 +360,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
|
||||
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
|
||||
void free_pages_exact(void *virt, size_t size);
|
||||
/* This is different from alloc_pages_exact_node !!! */
|
||||
void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
||||
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
||||
|
||||
#define __get_free_page(gfp_mask) \
|
||||
__get_free_pages((gfp_mask), 0)
|
||||
|
9
include/linux/glob.h
Normal file
9
include/linux/glob.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _LINUX_GLOB_H
|
||||
#define _LINUX_GLOB_H
|
||||
|
||||
#include <linux/types.h> /* For bool */
|
||||
#include <linux/compiler.h> /* For __pure */
|
||||
|
||||
bool __pure glob_match(char const *pat, char const *str);
|
||||
|
||||
#endif /* _LINUX_GLOB_H */
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user