Merge branch 'core-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull unified TLB flushing from Ingo Molnar: "This contains the generic mmu_gather feature from Peter Zijlstra, which is an all-arch unification of TLB flushing APIs, via the following (broad) steps: - enhance the <asm-generic/tlb.h> APIs to cover more arch details - convert most TLB flushing arch implementations to the generic <asm-generic/tlb.h> APIs. - remove leftovers of per arch implementations After this series every single architecture makes use of the unified TLB flushing APIs" * 'core-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mm/resource: Use resource_overlaps() to simplify region_intersects() ia64/tlb: Eradicate tlb_migrate_finish() callback asm-generic/tlb: Remove tlb_table_flush() asm-generic/tlb: Remove tlb_flush_mmu_free() asm-generic/tlb: Remove CONFIG_HAVE_GENERIC_MMU_GATHER asm-generic/tlb: Remove arch_tlb*_mmu() s390/tlb: Convert to generic mmu_gather asm-generic/tlb: Introduce CONFIG_HAVE_MMU_GATHER_NO_GATHER=y arch/tlb: Clean up simple architectures um/tlb: Convert to generic mmu_gather sh/tlb: Convert SH to generic mmu_gather ia64/tlb: Convert to generic mmu_gather arm/tlb: Convert to generic mmu_gather asm-generic/tlb, arch: Invert CONFIG_HAVE_RCU_TABLE_INVALIDATE asm-generic/tlb, ia64: Conditionally provide tlb_migrate_finish() asm-generic/tlb: Provide generic tlb_flush() based on flush_tlb_mm() asm-generic/tlb, arch: Provide generic tlb_flush() based on flush_tlb_range() asm-generic/tlb, arch: Provide generic VIPT cache flush asm-generic/tlb, arch: Provide CONFIG_HAVE_MMU_GATHER_PAGE_SIZE asm-generic/tlb: Provide a comment
This commit is contained in:
@@ -55,7 +55,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size,
|
||||
*
|
||||
* MEMREMAP_WB - matches the default mapping for System RAM on
|
||||
* the architecture. This is usually a read-allocate write-back cache.
|
||||
* Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
|
||||
* Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
|
||||
* memremap() will bypass establishing a new mapping and instead return
|
||||
* a pointer into the direct map.
|
||||
*
|
||||
@@ -86,7 +86,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
|
||||
/* Try all mapping types requested until one returns non-NULL */
|
||||
if (flags & MEMREMAP_WB) {
|
||||
/*
|
||||
* MEMREMAP_WB is special in that it can be satisifed
|
||||
* MEMREMAP_WB is special in that it can be satisfied
|
||||
* from the direct map. Some archs depend on the
|
||||
* capability of memremap() to autodetect cases where
|
||||
* the requested range is potentially in System RAM.
|
||||
|
@@ -520,21 +520,20 @@ EXPORT_SYMBOL_GPL(page_is_ram);
|
||||
int region_intersects(resource_size_t start, size_t size, unsigned long flags,
|
||||
unsigned long desc)
|
||||
{
|
||||
resource_size_t end = start + size - 1;
|
||||
struct resource res;
|
||||
int type = 0; int other = 0;
|
||||
struct resource *p;
|
||||
|
||||
res.start = start;
|
||||
res.end = start + size - 1;
|
||||
|
||||
read_lock(&resource_lock);
|
||||
for (p = iomem_resource.child; p ; p = p->sibling) {
|
||||
bool is_type = (((p->flags & flags) == flags) &&
|
||||
((desc == IORES_DESC_NONE) ||
|
||||
(desc == p->desc)));
|
||||
|
||||
if (start >= p->start && start <= p->end)
|
||||
is_type ? type++ : other++;
|
||||
if (end >= p->start && end <= p->end)
|
||||
is_type ? type++ : other++;
|
||||
if (p->start >= start && p->end <= end)
|
||||
if (resource_overlaps(p, &res))
|
||||
is_type ? type++ : other++;
|
||||
}
|
||||
read_unlock(&resource_lock);
|
||||
|
@@ -1151,7 +1151,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
/* Need help from migration thread: drop lock and wait. */
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
|
||||
tlb_migrate_finish(p->mm);
|
||||
return 0;
|
||||
} else if (task_on_rq_queued(p)) {
|
||||
/*
|
||||
|
Reference in New Issue
Block a user