Merge commit 'md/for-linus' into async-tx-next
Conflicts: drivers/md/raid5.c
This commit is contained in:
@@ -598,8 +598,6 @@ typedef struct risc_timer_pram {
|
||||
#define CICR_IEN ((uint)0x00000080) /* Int. enable */
|
||||
#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
|
||||
|
||||
#define IMAP_ADDR (get_immrbase())
|
||||
|
||||
#define CPM_PIN_INPUT 0
|
||||
#define CPM_PIN_OUTPUT 1
|
||||
#define CPM_PIN_PRIMARY 0
|
||||
|
@@ -63,6 +63,8 @@ extern void udelay(unsigned long usecs);
|
||||
udelay(delay); \
|
||||
else \
|
||||
cpu_relax(); \
|
||||
if (!__ret) \
|
||||
__ret = (condition); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
|
@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
|
||||
|
||||
if (dma_ops->sync_single_range_for_cpu)
|
||||
dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
|
||||
size, direction);
|
||||
}
|
||||
|
||||
@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_device(dev, dma_handle,
|
||||
|
||||
if (dma_ops->sync_single_range_for_device)
|
||||
dma_ops->sync_single_range_for_device(dev, dma_handle,
|
||||
0, size, direction);
|
||||
}
|
||||
|
||||
@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
|
||||
|
||||
if (dma_ops->sync_sg_for_cpu)
|
||||
dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
|
||||
|
||||
if (dma_ops->sync_sg_for_device)
|
||||
dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_cpu(dev, dma_handle,
|
||||
|
||||
if (dma_ops->sync_single_range_for_cpu)
|
||||
dma_ops->sync_single_range_for_cpu(dev, dma_handle,
|
||||
offset, size, direction);
|
||||
}
|
||||
|
||||
@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
|
||||
|
||||
if (dma_ops->sync_single_range_for_device)
|
||||
dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
|
||||
size, direction);
|
||||
}
|
||||
#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
|
||||
|
@@ -22,9 +22,7 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/page.h>
|
||||
@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table;
|
||||
|
||||
extern void *kmap_high(struct page *page);
|
||||
extern void kunmap_high(struct page *page);
|
||||
extern void *kmap_atomic_prot(struct page *page, enum km_type type,
|
||||
pgprot_t prot);
|
||||
extern void kunmap_atomic(void *kvaddr, enum km_type type);
|
||||
|
||||
static inline void *kmap(struct page *page)
|
||||
{
|
||||
@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page)
|
||||
kunmap_high(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
|
||||
* gives a more generic (and caching) interface. But kmap_atomic can
|
||||
* be used in IRQ contexts, so in some (very limited) cases we need
|
||||
* it.
|
||||
*/
|
||||
static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
|
||||
{
|
||||
unsigned int idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
||||
pagefault_disable();
|
||||
if (!PageHighMem(page))
|
||||
return page_address(page);
|
||||
|
||||
debug_kmap_atomic(type);
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
||||
#endif
|
||||
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
|
||||
local_flush_tlb_page(NULL, vaddr);
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
|
||||
static inline void *kmap_atomic(struct page *page, enum km_type type)
|
||||
{
|
||||
return kmap_atomic_prot(page, type, kmap_prot);
|
||||
}
|
||||
|
||||
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
|
||||
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
||||
pagefault_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
||||
|
||||
/*
|
||||
* force other mappings to Oops if they'll try to access
|
||||
* this pte without first remap it
|
||||
*/
|
||||
pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
||||
local_flush_tlb_page(NULL, vaddr);
|
||||
#endif
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
static inline struct page *kmap_atomic_to_page(void *ptr)
|
||||
{
|
||||
unsigned long idx, vaddr = (unsigned long) ptr;
|
||||
@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr)
|
||||
return pte_page(*pte);
|
||||
}
|
||||
|
||||
|
||||
#define flush_cache_kmaps() flush_cache_all()
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags)
|
||||
|
||||
#if defined(CONFIG_BOOKE)
|
||||
#define SET_MSR_EE(x) mtmsr(x)
|
||||
#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
|
||||
#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
|
||||
#else
|
||||
#define SET_MSR_EE(x) mtmsr(x)
|
||||
#define local_irq_restore(flags) mtmsr(flags)
|
||||
#define raw_local_irq_restore(flags) mtmsr(flags)
|
||||
#endif
|
||||
|
||||
static inline void local_irq_disable(void)
|
||||
static inline void raw_local_irq_disable(void)
|
||||
{
|
||||
#ifdef CONFIG_BOOKE
|
||||
__asm__ __volatile__("wrteei 0": : :"memory");
|
||||
@@ -86,7 +86,7 @@ static inline void local_irq_disable(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void local_irq_enable(void)
|
||||
static inline void raw_local_irq_enable(void)
|
||||
{
|
||||
#ifdef CONFIG_BOOKE
|
||||
__asm__ __volatile__("wrteei 1": : :"memory");
|
||||
@@ -98,7 +98,7 @@ static inline void local_irq_enable(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void local_irq_save_ptr(unsigned long *flags)
|
||||
static inline void raw_local_irq_save_ptr(unsigned long *flags)
|
||||
{
|
||||
unsigned long msr;
|
||||
msr = mfmsr();
|
||||
@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags)
|
||||
#endif
|
||||
}
|
||||
|
||||
#define local_save_flags(flags) ((flags) = mfmsr())
|
||||
#define local_irq_save(flags) local_irq_save_ptr(&flags)
|
||||
#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
|
||||
#define raw_local_save_flags(flags) ((flags) = mfmsr())
|
||||
#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
|
||||
#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
|
||||
#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
|
||||
|
||||
#define hard_irq_enable() local_irq_enable()
|
||||
#define hard_irq_disable() local_irq_disable()
|
||||
#define hard_irq_disable() raw_local_irq_disable()
|
||||
|
||||
static inline int irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
|
@@ -61,6 +61,8 @@ struct pt_regs;
|
||||
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 1
|
||||
|
||||
/*
|
||||
* Only override the default definitions in include/linux/perf_counter.h
|
||||
* if we have hardware PMU support.
|
||||
|
@@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
*/
|
||||
/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
|
||||
#define pmd_free(mm, x) do { } while (0)
|
||||
#define __pmd_free_tlb(tlb,x) do { } while (0)
|
||||
#define __pmd_free_tlb(tlb,x,a) do { } while (0)
|
||||
/* #define pgd_populate(mm, pmd, pte) BUG() */
|
||||
|
||||
#ifndef CONFIG_BOOKE
|
||||
|
@@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf)
|
||||
kmem_cache_free(pgtable_cache[cachenum], p);
|
||||
}
|
||||
|
||||
#define __pmd_free_tlb(tlb, pmd) \
|
||||
#define __pmd_free_tlb(tlb, pmd,addr) \
|
||||
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
|
||||
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
|
||||
#ifndef CONFIG_PPC_64K_PAGES
|
||||
#define __pud_free_tlb(tlb, pud) \
|
||||
#define __pud_free_tlb(tlb, pud, addr) \
|
||||
pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
|
||||
PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
|
||||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
@@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
|
||||
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __pte_free_tlb(tlb,ptepage) \
|
||||
#define __pte_free_tlb(tlb,ptepage,address) \
|
||||
do { \
|
||||
pgtable_page_dtor(ptepage); \
|
||||
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
|
||||
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
|
||||
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
|
||||
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte))
|
||||
#endif
|
||||
|
||||
|
||||
|
@@ -47,7 +47,8 @@
|
||||
* generic accessors and iterators here
|
||||
*/
|
||||
#define __real_pte(e,p) ((real_pte_t) { \
|
||||
(e), pte_val(*((p) + PTRS_PER_PTE)) })
|
||||
(e), ((e) & _PAGE_COMBO) ? \
|
||||
(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
|
||||
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
|
||||
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
|
||||
#define __rpte_to_pte(r) ((r).pte)
|
||||
|
@@ -58,7 +58,7 @@ struct rtas_t {
|
||||
unsigned long entry; /* physical address pointer */
|
||||
unsigned long base; /* physical address pointer */
|
||||
unsigned long size;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
struct rtas_args args;
|
||||
struct device_node *dev; /* virtual address pointer */
|
||||
};
|
||||
@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
|
||||
(devfn << 8) | (reg & 0xff);
|
||||
}
|
||||
|
||||
extern void __cpuinit rtas_give_timebase(void);
|
||||
extern void __cpuinit rtas_take_timebase(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _POWERPC_RTAS_H */
|
||||
|
@@ -46,15 +46,13 @@ struct thread_info {
|
||||
|
||||
/*
|
||||
* macros/functions for gaining access to the thread information structure
|
||||
*
|
||||
* preempt_count needs to be 1 initially, until the scheduler is functional.
|
||||
*/
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.task = &tsk, \
|
||||
.exec_domain = &default_exec_domain, \
|
||||
.cpu = 0, \
|
||||
.preempt_count = 1, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.restart_block = { \
|
||||
.fn = do_no_restart_syscall, \
|
||||
}, \
|
||||
|
Reference in New Issue
Block a user