Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few misc things and hotfixes - ocfs2 - almost all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (139 commits) kernel/memremap.c: remove the unused device_private_entry_fault() export mm: delete find_get_entries_tag mm/huge_memory.c: make __thp_get_unmapped_area static mm/mprotect.c: fix compilation warning because of unused 'mm' variable mm/page-writeback: introduce tracepoint for wait_on_page_writeback() mm/vmscan: simplify trace_reclaim_flags and trace_shrink_flags mm/Kconfig: update "Memory Model" help text mm/vmscan.c: don't disable irq again when count pgrefill for memcg mm: memblock: make keeping memblock memory opt-in rather than opt-out hugetlbfs: always use address space in inode for resv_map pointer mm/z3fold.c: support page migration mm/z3fold.c: add structure for buddy handles mm/z3fold.c: improve compression by extending search mm/z3fold.c: introduce helper functions mm/page_alloc.c: remove unnecessary parameter in rmqueue_pcplist mm/hmm: add ARCH_HAS_HMM_MIRROR ARCH_HAS_HMM_DEVICE Kconfig mm/vmscan.c: simplify shrink_inactive_list() fs/sync.c: sync_file_range(2) may use WB_SYNC_ALL writeback xen/privcmd-buf.c: convert to use vm_map_pages_zero() xen/gntdev.c: convert to use vm_map_pages() ...
This commit is contained in:
@@ -61,6 +61,7 @@ Currently, these files are in /proc/sys/vm:
|
|||||||
- stat_refresh
|
- stat_refresh
|
||||||
- numa_stat
|
- numa_stat
|
||||||
- swappiness
|
- swappiness
|
||||||
|
- unprivileged_userfaultfd
|
||||||
- user_reserve_kbytes
|
- user_reserve_kbytes
|
||||||
- vfs_cache_pressure
|
- vfs_cache_pressure
|
||||||
- watermark_boost_factor
|
- watermark_boost_factor
|
||||||
@@ -818,6 +819,17 @@ The default value is 60.
|
|||||||
|
|
||||||
==============================================================
|
==============================================================
|
||||||
|
|
||||||
|
unprivileged_userfaultfd
|
||||||
|
|
||||||
|
This flag controls whether unprivileged users can use the userfaultfd
|
||||||
|
system calls. Set this to 1 to allow unprivileged users to use the
|
||||||
|
userfaultfd system calls, or set this to 0 to restrict userfaultfd to only
|
||||||
|
privileged users (with SYS_CAP_PTRACE capability).
|
||||||
|
|
||||||
|
The default value is 1.
|
||||||
|
|
||||||
|
==============================================================
|
||||||
|
|
||||||
- user_reserve_kbytes
|
- user_reserve_kbytes
|
||||||
|
|
||||||
When overcommit_memory is set to 2, "never overcommit" mode, reserve
|
When overcommit_memory is set to 2, "never overcommit" mode, reserve
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
|
|||||||
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
|
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
|
||||||
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)';
|
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)';
|
||||||
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)';
|
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)';
|
||||||
my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
|
my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate_anon=([0-9]*) nr_activate_file=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
|
||||||
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
|
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
|
||||||
my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
|
my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
|
||||||
|
|
||||||
@@ -212,7 +212,8 @@ $regex_lru_shrink_inactive = generate_traceevent_regex(
|
|||||||
"vmscan/mm_vmscan_lru_shrink_inactive",
|
"vmscan/mm_vmscan_lru_shrink_inactive",
|
||||||
$regex_lru_shrink_inactive_default,
|
$regex_lru_shrink_inactive_default,
|
||||||
"nid", "nr_scanned", "nr_reclaimed", "nr_dirty", "nr_writeback",
|
"nid", "nr_scanned", "nr_reclaimed", "nr_dirty", "nr_writeback",
|
||||||
"nr_congested", "nr_immediate", "nr_activate", "nr_ref_keep",
|
"nr_congested", "nr_immediate", "nr_activate_anon",
|
||||||
|
"nr_activate_file", "nr_ref_keep",
|
||||||
"nr_unmap_fail", "priority", "flags");
|
"nr_unmap_fail", "priority", "flags");
|
||||||
$regex_lru_shrink_active = generate_traceevent_regex(
|
$regex_lru_shrink_active = generate_traceevent_regex(
|
||||||
"vmscan/mm_vmscan_lru_shrink_active",
|
"vmscan/mm_vmscan_lru_shrink_active",
|
||||||
@@ -407,7 +408,7 @@ EVENT_PROCESS:
|
|||||||
}
|
}
|
||||||
|
|
||||||
my $nr_reclaimed = $3;
|
my $nr_reclaimed = $3;
|
||||||
my $flags = $12;
|
my $flags = $13;
|
||||||
my $file = 0;
|
my $file = 0;
|
||||||
if ($flags =~ /RECLAIM_WB_FILE/) {
|
if ($flags =~ /RECLAIM_WB_FILE/) {
|
||||||
$file = 1;
|
$file = 1;
|
||||||
|
|||||||
@@ -189,20 +189,10 @@ the driver callback returns.
|
|||||||
When the device driver wants to populate a range of virtual addresses, it can
|
When the device driver wants to populate a range of virtual addresses, it can
|
||||||
use either::
|
use either::
|
||||||
|
|
||||||
int hmm_vma_get_pfns(struct vm_area_struct *vma,
|
long hmm_range_snapshot(struct hmm_range *range);
|
||||||
struct hmm_range *range,
|
long hmm_range_fault(struct hmm_range *range, bool block);
|
||||||
unsigned long start,
|
|
||||||
unsigned long end,
|
|
||||||
hmm_pfn_t *pfns);
|
|
||||||
int hmm_vma_fault(struct vm_area_struct *vma,
|
|
||||||
struct hmm_range *range,
|
|
||||||
unsigned long start,
|
|
||||||
unsigned long end,
|
|
||||||
hmm_pfn_t *pfns,
|
|
||||||
bool write,
|
|
||||||
bool block);
|
|
||||||
|
|
||||||
The first one (hmm_vma_get_pfns()) will only fetch present CPU page table
|
The first one (hmm_range_snapshot()) will only fetch present CPU page table
|
||||||
entries and will not trigger a page fault on missing or non-present entries.
|
entries and will not trigger a page fault on missing or non-present entries.
|
||||||
The second one does trigger a page fault on missing or read-only entry if the
|
The second one does trigger a page fault on missing or read-only entry if the
|
||||||
write parameter is true. Page faults use the generic mm page fault code path
|
write parameter is true. Page faults use the generic mm page fault code path
|
||||||
@@ -220,25 +210,56 @@ respect in order to keep things properly synchronized. The usage pattern is::
|
|||||||
{
|
{
|
||||||
struct hmm_range range;
|
struct hmm_range range;
|
||||||
...
|
...
|
||||||
|
|
||||||
|
range.start = ...;
|
||||||
|
range.end = ...;
|
||||||
|
range.pfns = ...;
|
||||||
|
range.flags = ...;
|
||||||
|
range.values = ...;
|
||||||
|
range.pfn_shift = ...;
|
||||||
|
hmm_range_register(&range);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Just wait for range to be valid, safe to ignore return value as we
|
||||||
|
* will use the return value of hmm_range_snapshot() below under the
|
||||||
|
* mmap_sem to ascertain the validity of the range.
|
||||||
|
*/
|
||||||
|
hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC);
|
||||||
|
|
||||||
again:
|
again:
|
||||||
ret = hmm_vma_get_pfns(vma, &range, start, end, pfns);
|
down_read(&mm->mmap_sem);
|
||||||
if (ret)
|
ret = hmm_range_snapshot(&range);
|
||||||
|
if (ret) {
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
if (ret == -EAGAIN) {
|
||||||
|
/*
|
||||||
|
* No need to check hmm_range_wait_until_valid() return value
|
||||||
|
* on retry we will get proper error with hmm_range_snapshot()
|
||||||
|
*/
|
||||||
|
hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
hmm_mirror_unregister(&range);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
take_lock(driver->update);
|
take_lock(driver->update);
|
||||||
if (!hmm_vma_range_done(vma, &range)) {
|
if (!range.valid) {
|
||||||
release_lock(driver->update);
|
release_lock(driver->update);
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use pfns array content to update device page table
|
// Use pfns array content to update device page table
|
||||||
|
|
||||||
|
hmm_mirror_unregister(&range);
|
||||||
release_lock(driver->update);
|
release_lock(driver->update);
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
The driver->update lock is the same lock that the driver takes inside its
|
The driver->update lock is the same lock that the driver takes inside its
|
||||||
update() callback. That lock must be held before hmm_vma_range_done() to avoid
|
update() callback. That lock must be held before checking the range.valid
|
||||||
any race with a concurrent CPU page table update.
|
field to avoid any race with a concurrent CPU page table update.
|
||||||
|
|
||||||
HMM implements all this on top of the mmu_notifier API because we wanted a
|
HMM implements all this on top of the mmu_notifier API because we wanted a
|
||||||
simpler API and also to be able to perform optimizations latter on like doing
|
simpler API and also to be able to perform optimizations latter on like doing
|
||||||
@@ -255,6 +276,41 @@ report commands as executed is serialized (there is no point in doing this
|
|||||||
concurrently).
|
concurrently).
|
||||||
|
|
||||||
|
|
||||||
|
Leverage default_flags and pfn_flags_mask
|
||||||
|
=========================================
|
||||||
|
|
||||||
|
The hmm_range struct has 2 fields default_flags and pfn_flags_mask that allows
|
||||||
|
to set fault or snapshot policy for a whole range instead of having to set them
|
||||||
|
for each entries in the range.
|
||||||
|
|
||||||
|
For instance if the device flags for device entries are:
|
||||||
|
VALID (1 << 63)
|
||||||
|
WRITE (1 << 62)
|
||||||
|
|
||||||
|
Now let say that device driver wants to fault with at least read a range then
|
||||||
|
it does set:
|
||||||
|
range->default_flags = (1 << 63)
|
||||||
|
range->pfn_flags_mask = 0;
|
||||||
|
|
||||||
|
and calls hmm_range_fault() as described above. This will fill fault all page
|
||||||
|
in the range with at least read permission.
|
||||||
|
|
||||||
|
Now let say driver wants to do the same except for one page in the range for
|
||||||
|
which its want to have write. Now driver set:
|
||||||
|
range->default_flags = (1 << 63);
|
||||||
|
range->pfn_flags_mask = (1 << 62);
|
||||||
|
range->pfns[index_of_write] = (1 << 62);
|
||||||
|
|
||||||
|
With this HMM will fault in all page with at least read (ie valid) and for the
|
||||||
|
address == range->start + (index_of_write << PAGE_SHIFT) it will fault with
|
||||||
|
write permission ie if the CPU pte does not have write permission set then HMM
|
||||||
|
will call handle_mm_fault().
|
||||||
|
|
||||||
|
Note that HMM will populate the pfns array with write permission for any entry
|
||||||
|
that have write permission within the CPU pte no matter what are the values set
|
||||||
|
in default_flags or pfn_flags_mask.
|
||||||
|
|
||||||
|
|
||||||
Represent and manage device memory from core kernel point of view
|
Represent and manage device memory from core kernel point of view
|
||||||
=================================================================
|
=================================================================
|
||||||
|
|
||||||
|
|||||||
@@ -11746,6 +11746,7 @@ F: include/linux/oprofile.h
|
|||||||
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
|
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
|
||||||
M: Mark Fasheh <mark@fasheh.com>
|
M: Mark Fasheh <mark@fasheh.com>
|
||||||
M: Joel Becker <jlbec@evilplan.org>
|
M: Joel Becker <jlbec@evilplan.org>
|
||||||
|
M: Joseph Qi <joseph.qi@linux.alibaba.com>
|
||||||
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
|
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
|
||||||
W: http://ocfs2.wiki.kernel.org
|
W: http://ocfs2.wiki.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|||||||
@@ -245,6 +245,13 @@ config ARCH_HAS_FORTIFY_SOURCE
|
|||||||
An architecture should select this when it can successfully
|
An architecture should select this when it can successfully
|
||||||
build and run with CONFIG_FORTIFY_SOURCE.
|
build and run with CONFIG_FORTIFY_SOURCE.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Select if the arch provides a historic keepinit alias for the retain_initrd
|
||||||
|
# command line option
|
||||||
|
#
|
||||||
|
config ARCH_HAS_KEEPINITRD
|
||||||
|
bool
|
||||||
|
|
||||||
# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
|
# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
|
||||||
config ARCH_HAS_SET_MEMORY
|
config ARCH_HAS_SET_MEMORY
|
||||||
bool
|
bool
|
||||||
|
|||||||
@@ -285,17 +285,3 @@ mem_init(void)
|
|||||||
memblock_free_all();
|
memblock_free_all();
|
||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void
|
|
||||||
free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -206,18 +206,3 @@ void __init mem_init(void)
|
|||||||
memblock_free_all();
|
memblock_free_all();
|
||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* free_initmem: Free all the __init memory.
|
|
||||||
*/
|
|
||||||
void __ref free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -4,11 +4,11 @@ config ARM
|
|||||||
default y
|
default y
|
||||||
select ARCH_32BIT_OFF_T
|
select ARCH_32BIT_OFF_T
|
||||||
select ARCH_CLOCKSOURCE_DATA
|
select ARCH_CLOCKSOURCE_DATA
|
||||||
select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC
|
|
||||||
select ARCH_HAS_DEBUG_VIRTUAL if MMU
|
select ARCH_HAS_DEBUG_VIRTUAL if MMU
|
||||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||||
select ARCH_HAS_ELF_RANDOMIZE
|
select ARCH_HAS_ELF_RANDOMIZE
|
||||||
select ARCH_HAS_FORTIFY_SOURCE
|
select ARCH_HAS_FORTIFY_SOURCE
|
||||||
|
select ARCH_HAS_KEEPINITRD
|
||||||
select ARCH_HAS_KCOV
|
select ARCH_HAS_KCOV
|
||||||
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||||
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
|
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
|
||||||
@@ -21,6 +21,7 @@ config ARM
|
|||||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||||
|
select ARCH_KEEP_MEMBLOCK if HAVE_ARCH_PFN_VALID || KEXEC
|
||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
|
select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
|
||||||
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
||||||
|
|||||||
@@ -1577,31 +1577,21 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
|
|||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned long uaddr = vma->vm_start;
|
|
||||||
unsigned long usize = vma->vm_end - vma->vm_start;
|
|
||||||
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
||||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
unsigned long off = vma->vm_pgoff;
|
int err;
|
||||||
|
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
|
if (vma->vm_pgoff >= nr_pages)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
pages += off;
|
err = vm_map_pages(vma, pages, nr_pages);
|
||||||
|
if (err)
|
||||||
|
pr_err("Remapping memory failed: %d\n", err);
|
||||||
|
|
||||||
do {
|
return err;
|
||||||
int ret = vm_insert_page(vma, uaddr, *pages++);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("Remapping memory failed: %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
uaddr += PAGE_SIZE;
|
|
||||||
usize -= PAGE_SIZE;
|
|
||||||
} while (usize > 0);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
static int arm_iommu_mmap_attrs(struct device *dev,
|
static int arm_iommu_mmap_attrs(struct device *dev,
|
||||||
struct vm_area_struct *vma, void *cpu_addr,
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
|||||||
@@ -695,27 +695,14 @@ void free_initmem(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
|
|
||||||
static int keep_initrd;
|
|
||||||
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
if (!keep_initrd) {
|
if (start == initrd_start)
|
||||||
if (start == initrd_start)
|
start = round_down(start, PAGE_SIZE);
|
||||||
start = round_down(start, PAGE_SIZE);
|
if (end == initrd_end)
|
||||||
if (end == initrd_end)
|
end = round_up(end, PAGE_SIZE);
|
||||||
end = round_up(end, PAGE_SIZE);
|
|
||||||
|
|
||||||
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
|
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init keepinitrd_setup(char *__unused)
|
|
||||||
{
|
|
||||||
keep_initrd = 1;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
__setup("keepinitrd", keepinitrd_setup);
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -19,8 +19,9 @@ config ARM64
|
|||||||
select ARCH_HAS_FAST_MULTIPLIER
|
select ARCH_HAS_FAST_MULTIPLIER
|
||||||
select ARCH_HAS_FORTIFY_SOURCE
|
select ARCH_HAS_FORTIFY_SOURCE
|
||||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
select ARCH_HAS_KCOV
|
select ARCH_HAS_KCOV
|
||||||
|
select ARCH_HAS_KEEPINITRD
|
||||||
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||||
select ARCH_HAS_PTE_SPECIAL
|
select ARCH_HAS_PTE_SPECIAL
|
||||||
select ARCH_HAS_SETUP_DMA_OPS
|
select ARCH_HAS_SETUP_DMA_OPS
|
||||||
@@ -59,6 +60,7 @@ config ARM64
|
|||||||
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
|
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
|
||||||
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
|
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
|
||||||
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
|
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
|
||||||
|
select ARCH_KEEP_MEMBLOCK
|
||||||
select ARCH_USE_CMPXCHG_LOCKREF
|
select ARCH_USE_CMPXCHG_LOCKREF
|
||||||
select ARCH_USE_QUEUED_RWLOCKS
|
select ARCH_USE_QUEUED_RWLOCKS
|
||||||
select ARCH_USE_QUEUED_SPINLOCKS
|
select ARCH_USE_QUEUED_SPINLOCKS
|
||||||
|
|||||||
@@ -70,8 +70,4 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||||||
|
|
||||||
#include <asm-generic/hugetlb.h>
|
#include <asm-generic/hugetlb.h>
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
|
||||||
static inline bool gigantic_page_supported(void) { return true; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __ASM_HUGETLB_H */
|
#endif /* __ASM_HUGETLB_H */
|
||||||
|
|||||||
@@ -578,24 +578,11 @@ void free_initmem(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
|
|
||||||
static int keep_initrd __initdata;
|
|
||||||
|
|
||||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
if (!keep_initrd) {
|
free_reserved_area((void *)start, (void *)end, 0, "initrd");
|
||||||
free_reserved_area((void *)start, (void *)end, 0, "initrd");
|
memblock_free(__virt_to_phys(start), end - start);
|
||||||
memblock_free(__virt_to_phys(start), end - start);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init keepinitrd_setup(char *__unused)
|
|
||||||
{
|
|
||||||
keep_initrd = 1;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
__setup("keepinitrd", keepinitrd_setup);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -1065,8 +1065,8 @@ int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
|
|
||||||
@@ -1077,6 +1077,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
|||||||
size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
|
size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
|
||||||
|
|
||||||
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
|
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
|
||||||
altmap, want_memblock);
|
restrictions);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -68,15 +68,3 @@ void __init mem_init(void)
|
|||||||
|
|
||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void __init free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -102,17 +102,3 @@ void __init mem_init(void)
|
|||||||
|
|
||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void
|
|
||||||
free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ config HEXAGON
|
|||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
|
||||||
select NEED_SG_DMA_LENGTH
|
select NEED_SG_DMA_LENGTH
|
||||||
select NO_IOPORT_MAP
|
select NO_IOPORT_MAP
|
||||||
select GENERIC_IOMAP
|
select GENERIC_IOMAP
|
||||||
|
|||||||
@@ -84,16 +84,6 @@ void __init mem_init(void)
|
|||||||
init_mm.context.ptbase = __pa(init_mm.pgd);
|
init_mm.context.ptbase = __pa(init_mm.pgd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* free_initmem - frees memory used by stuff declared with __init
|
|
||||||
*
|
|
||||||
* Todo: free pages between __init_begin and __init_end; possibly
|
|
||||||
* some devtree related stuff as well.
|
|
||||||
*/
|
|
||||||
void __ref free_initmem(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* free_initrd_mem - frees... initrd memory.
|
* free_initrd_mem - frees... initrd memory.
|
||||||
* @start - start of init memory
|
* @start - start of init memory
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ config IA64
|
|||||||
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
|
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
|
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
|
||||||
select VIRT_TO_BUS
|
select VIRT_TO_BUS
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_PENDING_IRQ if SMP
|
select GENERIC_PENDING_IRQ if SMP
|
||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
|
|||||||
@@ -666,14 +666,14 @@ mem_init (void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
if (ret)
|
if (ret)
|
||||||
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
|
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
|
||||||
__func__, ret);
|
__func__, ret);
|
||||||
@@ -682,20 +682,15 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
void arch_remove_memory(int nid, u64 start, u64 size,
|
||||||
|
struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int ret;
|
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
zone = page_zone(pfn_to_page(start_pfn));
|
||||||
ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
|
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||||
if (ret)
|
|
||||||
pr_warn("%s: Problem encountered in __remove_pages() as"
|
|
||||||
" ret=%d\n", __func__, ret);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ config M68K
|
|||||||
select MODULES_USE_ELF_RELA
|
select MODULES_USE_ELF_RELA
|
||||||
select OLD_SIGSUSPEND3
|
select OLD_SIGSUSPEND3
|
||||||
select OLD_SIGACTION
|
select OLD_SIGACTION
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
|
||||||
select MMU_GATHER_NO_RANGE if MMU
|
select MMU_GATHER_NO_RANGE if MMU
|
||||||
|
|
||||||
config CPU_BIG_ENDIAN
|
config CPU_BIG_ENDIAN
|
||||||
|
|||||||
@@ -147,10 +147,3 @@ void __init mem_init(void)
|
|||||||
init_pointer_tables();
|
init_pointer_tables();
|
||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -186,18 +186,6 @@ void __init setup_memory(void)
|
|||||||
paging_init();
|
paging_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init mem_init(void)
|
void __init mem_init(void)
|
||||||
{
|
{
|
||||||
high_memory = (void *)__va(memory_start + lowmem_size - 1);
|
high_memory = (void *)__va(memory_start + lowmem_size - 1);
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ config MIPS
|
|||||||
select ARCH_32BIT_OFF_T if !64BIT
|
select ARCH_32BIT_OFF_T if !64BIT
|
||||||
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
|
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
|
||||||
select ARCH_CLOCKSOURCE_DATA
|
select ARCH_CLOCKSOURCE_DATA
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
|
||||||
select ARCH_HAS_ELF_RANDOMIZE
|
select ARCH_HAS_ELF_RANDOMIZE
|
||||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||||
|
|||||||
@@ -235,7 +235,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
* get_user_pages_fast() - pin user pages in memory
|
* get_user_pages_fast() - pin user pages in memory
|
||||||
* @start: starting user address
|
* @start: starting user address
|
||||||
* @nr_pages: number of pages from start to pin
|
* @nr_pages: number of pages from start to pin
|
||||||
* @write: whether pages will be written to
|
* @gup_flags: flags modifying pin behaviour
|
||||||
* @pages: array that receives pointers to the pages pinned.
|
* @pages: array that receives pointers to the pages pinned.
|
||||||
* Should be at least nr_pages long.
|
* Should be at least nr_pages long.
|
||||||
*
|
*
|
||||||
@@ -247,8 +247,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||||
* were pinned, returns -errno.
|
* were pinned, returns -errno.
|
||||||
*/
|
*/
|
||||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||||
struct page **pages)
|
unsigned int gup_flags, struct page **pages)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long addr, len, end;
|
unsigned long addr, len, end;
|
||||||
@@ -273,7 +273,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none(pgd))
|
if (pgd_none(pgd))
|
||||||
goto slow;
|
goto slow;
|
||||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
|
||||||
|
pages, &nr))
|
||||||
goto slow;
|
goto slow;
|
||||||
} while (pgdp++, addr = next, addr != end);
|
} while (pgdp++, addr = next, addr != end);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
@@ -289,7 +290,7 @@ slow_irqon:
|
|||||||
pages += nr;
|
pages += nr;
|
||||||
|
|
||||||
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
|
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
|
||||||
pages, write ? FOLL_WRITE : 0);
|
pages, gup_flags);
|
||||||
|
|
||||||
/* Have to be a bit careful with return values */
|
/* Have to be a bit careful with return values */
|
||||||
if (nr > 0) {
|
if (nr > 0) {
|
||||||
|
|||||||
@@ -504,14 +504,6 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
|
|||||||
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
|
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
|
||||||
"initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
|
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
|
||||||
|
|
||||||
void __ref free_initmem(void)
|
void __ref free_initmem(void)
|
||||||
|
|||||||
@@ -252,18 +252,6 @@ void __init mem_init(void)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void __set_fixmap(enum fixed_addresses idx,
|
void __set_fixmap(enum fixed_addresses idx,
|
||||||
phys_addr_t phys, pgprot_t flags)
|
phys_addr_t phys, pgprot_t flags)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ config NIOS2
|
|||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
select USB_ARCH_HAS_HCD if USB_SUPPORT
|
select USB_ARCH_HAS_HCD if USB_SUPPORT
|
||||||
select CPU_NO_EFFICIENT_FFS
|
select CPU_NO_EFFICIENT_FFS
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
|
||||||
select MMU_GATHER_NO_RANGE if MMU
|
select MMU_GATHER_NO_RANGE if MMU
|
||||||
|
|
||||||
config GENERIC_CSUM
|
config GENERIC_CSUM
|
||||||
|
|||||||
@@ -82,18 +82,6 @@ void __init mmu_init(void)
|
|||||||
flush_tlb_all();
|
flush_tlb_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void __ref free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
|
#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
|
||||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
|
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
|
||||||
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
|
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
|
||||||
|
|||||||
@@ -223,15 +223,3 @@ void __init mem_init(void)
|
|||||||
mem_init_done = 1;
|
mem_init_done = 1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -917,10 +917,3 @@ void flush_tlb_all(void)
|
|||||||
spin_unlock(&sid_lock);
|
spin_unlock(&sid_lock);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -137,6 +137,7 @@ config PPC
|
|||||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||||
select ARCH_HAS_ZONE_DEVICE if PPC_BOOK3S_64
|
select ARCH_HAS_ZONE_DEVICE if PPC_BOOK3S_64
|
||||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||||
|
select ARCH_KEEP_MEMBLOCK
|
||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||||
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
||||||
|
|||||||
@@ -36,8 +36,8 @@ static inline int hstate_get_psize(struct hstate *hstate)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
#define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
|
||||||
static inline bool gigantic_page_supported(void)
|
static inline bool gigantic_page_runtime_supported(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We used gigantic page reservation with hypervisor assist in some case.
|
* We used gigantic page reservation with hypervisor assist in some case.
|
||||||
@@ -49,7 +49,6 @@ static inline bool gigantic_page_supported(void)
|
|||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
/* hugepd entry valid bit */
|
/* hugepd entry valid bit */
|
||||||
#define HUGEPD_VAL_BITS (0x8000000000000000UL)
|
#define HUGEPD_VAL_BITS (0x8000000000000000UL)
|
||||||
|
|||||||
@@ -600,7 +600,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
/* If writing != 0, then the HPTE must allow writing, if we get here */
|
/* If writing != 0, then the HPTE must allow writing, if we get here */
|
||||||
write_ok = writing;
|
write_ok = writing;
|
||||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||||
npages = get_user_pages_fast(hva, 1, writing, pages);
|
npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
|
||||||
if (npages < 1) {
|
if (npages < 1) {
|
||||||
/* Check if it's an I/O mapping */
|
/* Check if it's an I/O mapping */
|
||||||
down_read(¤t->mm->mmap_sem);
|
down_read(¤t->mm->mmap_sem);
|
||||||
@@ -1193,7 +1193,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
|
|||||||
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
|
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
|
||||||
goto err;
|
goto err;
|
||||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||||
npages = get_user_pages_fast(hva, 1, 1, pages);
|
npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages);
|
||||||
if (npages < 1)
|
if (npages < 1)
|
||||||
goto err;
|
goto err;
|
||||||
page = pages[0];
|
page = pages[0];
|
||||||
|
|||||||
@@ -783,7 +783,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
|||||||
if (!pages)
|
if (!pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
|
ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto free_pages;
|
goto free_pages;
|
||||||
|
|
||||||
|
|||||||
@@ -141,8 +141,9 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||||||
for (entry = 0; entry < entries; entry += chunk) {
|
for (entry = 0; entry < entries; entry += chunk) {
|
||||||
unsigned long n = min(entries - entry, chunk);
|
unsigned long n = min(entries - entry, chunk);
|
||||||
|
|
||||||
ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
|
ret = get_user_pages(ua + (entry << PAGE_SHIFT), n,
|
||||||
FOLL_WRITE, mem->hpages + entry, NULL);
|
FOLL_WRITE | FOLL_LONGTERM,
|
||||||
|
mem->hpages + entry, NULL);
|
||||||
if (ret == n) {
|
if (ret == n) {
|
||||||
pinned += n;
|
pinned += n;
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@@ -109,8 +109,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int __ref arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
@@ -127,11 +127,11 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altm
|
|||||||
}
|
}
|
||||||
flush_inval_dcache_range(start, start + size);
|
flush_inval_dcache_range(start, start + size);
|
||||||
|
|
||||||
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
return __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
int __ref arch_remove_memory(int nid, u64 start, u64 size,
|
void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
||||||
struct vmem_altmap *altmap)
|
struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
@@ -147,14 +147,13 @@ int __ref arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
if (altmap)
|
if (altmap)
|
||||||
page += vmem_altmap_offset(altmap);
|
page += vmem_altmap_offset(altmap);
|
||||||
|
|
||||||
ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
|
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* Remove htab bolted mappings for this section of memory */
|
/* Remove htab bolted mappings for this section of memory */
|
||||||
start = (unsigned long)__va(start);
|
start = (unsigned long)__va(start);
|
||||||
flush_inval_dcache_range(start, start + size);
|
flush_inval_dcache_range(start, start + size);
|
||||||
ret = remove_section_mapping(start, start + size);
|
ret = remove_section_mapping(start, start + size);
|
||||||
|
WARN_ON_ONCE(ret);
|
||||||
|
|
||||||
/* Ensure all vmalloc mappings are flushed in case they also
|
/* Ensure all vmalloc mappings are flushed in case they also
|
||||||
* hit that section of memory
|
* hit that section of memory
|
||||||
@@ -163,8 +162,6 @@ int __ref arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
|
|
||||||
if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
|
if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
|
||||||
pr_warn("Hash collision while resizing HPT\n");
|
pr_warn("Hash collision while resizing HPT\n");
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
@@ -338,13 +335,6 @@ void free_initmem(void)
|
|||||||
free_initmem_default(POISON_FREE_INITMEM);
|
free_initmem_default(POISON_FREE_INITMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is called when a page has been modified by the kernel.
|
* This is called when a page has been modified by the kernel.
|
||||||
* It just marks the page as not i-cache clean. We do the i-cache
|
* It just marks the page as not i-cache clean. We do the i-cache
|
||||||
|
|||||||
@@ -331,7 +331,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
|||||||
config PPC_RADIX_MMU
|
config PPC_RADIX_MMU
|
||||||
bool "Radix MMU Support"
|
bool "Radix MMU Support"
|
||||||
depends on PPC_BOOK3S_64 && HUGETLB_PAGE
|
depends on PPC_BOOK3S_64 && HUGETLB_PAGE
|
||||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
select PPC_HAVE_KUEP
|
select PPC_HAVE_KUEP
|
||||||
select PPC_HAVE_KUAP
|
select PPC_HAVE_KUAP
|
||||||
default y
|
default y
|
||||||
|
|||||||
@@ -66,11 +66,6 @@ void __init mem_init(void)
|
|||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
static void __init setup_initrd(void)
|
static void __init setup_initrd(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ config S390
|
|||||||
select ARCH_HAS_ELF_RANDOMIZE
|
select ARCH_HAS_ELF_RANDOMIZE
|
||||||
select ARCH_HAS_FORTIFY_SOURCE
|
select ARCH_HAS_FORTIFY_SOURCE
|
||||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
select ARCH_HAS_KCOV
|
select ARCH_HAS_KCOV
|
||||||
select ARCH_HAS_PTE_SPECIAL
|
select ARCH_HAS_PTE_SPECIAL
|
||||||
select ARCH_HAS_SET_MEMORY
|
select ARCH_HAS_SET_MEMORY
|
||||||
@@ -100,6 +100,7 @@ config S390
|
|||||||
select ARCH_INLINE_WRITE_UNLOCK_BH
|
select ARCH_INLINE_WRITE_UNLOCK_BH
|
||||||
select ARCH_INLINE_WRITE_UNLOCK_IRQ
|
select ARCH_INLINE_WRITE_UNLOCK_IRQ
|
||||||
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
|
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
|
||||||
|
select ARCH_KEEP_MEMBLOCK
|
||||||
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
||||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||||
|
|||||||
@@ -116,7 +116,9 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
|
|||||||
return pte_modify(pte, newprot);
|
return pte_modify(pte, newprot);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
static inline bool gigantic_page_runtime_supported(void)
|
||||||
static inline bool gigantic_page_supported(void) { return true; }
|
{
|
||||||
#endif
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_S390_HUGETLB_H */
|
#endif /* _ASM_S390_HUGETLB_H */
|
||||||
|
|||||||
@@ -2376,7 +2376,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
|
|||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
|
ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
BUG_ON(ret != 1);
|
BUG_ON(ret != 1);
|
||||||
|
|||||||
@@ -157,14 +157,6 @@ void free_initmem(void)
|
|||||||
free_initmem_default(POISON_FREE_INITMEM);
|
free_initmem_default(POISON_FREE_INITMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
|
||||||
"initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
unsigned long memory_block_size_bytes(void)
|
unsigned long memory_block_size_bytes(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@@ -227,8 +219,8 @@ device_initcall(s390_cma_mem_init);
|
|||||||
|
|
||||||
#endif /* CONFIG_CMA */
|
#endif /* CONFIG_CMA */
|
||||||
|
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = PFN_DOWN(start);
|
unsigned long start_pfn = PFN_DOWN(start);
|
||||||
unsigned long size_pages = PFN_DOWN(size);
|
unsigned long size_pages = PFN_DOWN(size);
|
||||||
@@ -238,21 +230,22 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
|
rc = __add_pages(nid, start_pfn, size_pages, restrictions);
|
||||||
if (rc)
|
if (rc)
|
||||||
vmem_remove_mapping(start, size);
|
vmem_remove_mapping(start, size);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
void arch_remove_memory(int nid, u64 start, u64 size,
|
||||||
|
struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* There is no hardware or firmware interface which could trigger a
|
* There is no hardware or firmware interface which could trigger a
|
||||||
* hot memory remove on s390. So there is nothing that needs to be
|
* hot memory remove on s390. So there is nothing that needs to be
|
||||||
* implemented.
|
* implemented.
|
||||||
*/
|
*/
|
||||||
return -EBUSY;
|
BUG();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ config SUPERH
|
|||||||
select DMA_DECLARE_COHERENT
|
select DMA_DECLARE_COHERENT
|
||||||
select HAVE_IDE if HAS_IOPORT_MAP
|
select HAVE_IDE if HAS_IOPORT_MAP
|
||||||
select HAVE_MEMBLOCK_NODE_MAP
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
|
||||||
select HAVE_OPROFILE
|
select HAVE_OPROFILE
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_PERF_EVENTS
|
select HAVE_PERF_EVENTS
|
||||||
@@ -53,6 +52,7 @@ config SUPERH
|
|||||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||||
select HAVE_NMI
|
select HAVE_NMI
|
||||||
select NEED_SG_DMA_LENGTH
|
select NEED_SG_DMA_LENGTH
|
||||||
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
|
|
||||||
help
|
help
|
||||||
The SuperH is a RISC processor targeted for use in embedded systems
|
The SuperH is a RISC processor targeted for use in embedded systems
|
||||||
|
|||||||
@@ -10,7 +10,6 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/irq.h>
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <mach/sysasic.h>
|
#include <mach/sysasic.h>
|
||||||
|
|||||||
@@ -204,7 +204,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
* get_user_pages_fast() - pin user pages in memory
|
* get_user_pages_fast() - pin user pages in memory
|
||||||
* @start: starting user address
|
* @start: starting user address
|
||||||
* @nr_pages: number of pages from start to pin
|
* @nr_pages: number of pages from start to pin
|
||||||
* @write: whether pages will be written to
|
* @gup_flags: flags modifying pin behaviour
|
||||||
* @pages: array that receives pointers to the pages pinned.
|
* @pages: array that receives pointers to the pages pinned.
|
||||||
* Should be at least nr_pages long.
|
* Should be at least nr_pages long.
|
||||||
*
|
*
|
||||||
@@ -216,8 +216,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||||
* were pinned, returns -errno.
|
* were pinned, returns -errno.
|
||||||
*/
|
*/
|
||||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||||
struct page **pages)
|
unsigned int gup_flags, struct page **pages)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long addr, len, end;
|
unsigned long addr, len, end;
|
||||||
@@ -241,7 +241,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none(pgd))
|
if (pgd_none(pgd))
|
||||||
goto slow;
|
goto slow;
|
||||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
|
||||||
|
pages, &nr))
|
||||||
goto slow;
|
goto slow;
|
||||||
} while (pgdp++, addr = next, addr != end);
|
} while (pgdp++, addr = next, addr != end);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
@@ -261,7 +262,7 @@ slow_irqon:
|
|||||||
|
|
||||||
ret = get_user_pages_unlocked(start,
|
ret = get_user_pages_unlocked(start,
|
||||||
(end - start) >> PAGE_SHIFT, pages,
|
(end - start) >> PAGE_SHIFT, pages,
|
||||||
write ? FOLL_WRITE : 0);
|
gup_flags);
|
||||||
|
|
||||||
/* Have to be a bit careful with return values */
|
/* Have to be a bit careful with return values */
|
||||||
if (nr > 0) {
|
if (nr > 0) {
|
||||||
|
|||||||
@@ -403,28 +403,16 @@ void __init mem_init(void)
|
|||||||
mem_init_done = 1;
|
mem_init_done = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = PFN_DOWN(start);
|
unsigned long start_pfn = PFN_DOWN(start);
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* We only have ZONE_NORMAL, so this is easy.. */
|
/* We only have ZONE_NORMAL, so this is easy.. */
|
||||||
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
|
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
|
||||||
|
|
||||||
@@ -441,20 +429,15 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
void arch_remove_memory(int nid, u64 start, u64 size,
|
||||||
|
struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = PFN_DOWN(start);
|
unsigned long start_pfn = PFN_DOWN(start);
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int ret;
|
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
zone = page_zone(pfn_to_page(start_pfn));
|
||||||
ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
|
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||||
if (unlikely(ret))
|
|
||||||
pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
|
|
||||||
ret);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|||||||
@@ -92,6 +92,7 @@ config SPARC64
|
|||||||
select ARCH_CLOCKSOURCE_DATA
|
select ARCH_CLOCKSOURCE_DATA
|
||||||
select ARCH_HAS_PTE_SPECIAL
|
select ARCH_HAS_PTE_SPECIAL
|
||||||
select PCI_DOMAINS if PCI
|
select PCI_DOMAINS if PCI
|
||||||
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
|
|
||||||
config ARCH_DEFCONFIG
|
config ARCH_DEFCONFIG
|
||||||
string
|
string
|
||||||
|
|||||||
@@ -231,36 +231,6 @@ extern unsigned long _PAGE_ALL_SZ_BITS;
|
|||||||
extern struct page *mem_map_zero;
|
extern struct page *mem_map_zero;
|
||||||
#define ZERO_PAGE(vaddr) (mem_map_zero)
|
#define ZERO_PAGE(vaddr) (mem_map_zero)
|
||||||
|
|
||||||
/* This macro must be updated when the size of struct page grows above 80
|
|
||||||
* or reduces below 64.
|
|
||||||
* The idea that compiler optimizes out switch() statement, and only
|
|
||||||
* leaves clrx instructions
|
|
||||||
*/
|
|
||||||
#define mm_zero_struct_page(pp) do { \
|
|
||||||
unsigned long *_pp = (void *)(pp); \
|
|
||||||
\
|
|
||||||
/* Check that struct page is either 64, 72, or 80 bytes */ \
|
|
||||||
BUILD_BUG_ON(sizeof(struct page) & 7); \
|
|
||||||
BUILD_BUG_ON(sizeof(struct page) < 64); \
|
|
||||||
BUILD_BUG_ON(sizeof(struct page) > 80); \
|
|
||||||
\
|
|
||||||
switch (sizeof(struct page)) { \
|
|
||||||
case 80: \
|
|
||||||
_pp[9] = 0; /* fallthrough */ \
|
|
||||||
case 72: \
|
|
||||||
_pp[8] = 0; /* fallthrough */ \
|
|
||||||
default: \
|
|
||||||
_pp[7] = 0; \
|
|
||||||
_pp[6] = 0; \
|
|
||||||
_pp[5] = 0; \
|
|
||||||
_pp[4] = 0; \
|
|
||||||
_pp[3] = 0; \
|
|
||||||
_pp[2] = 0; \
|
|
||||||
_pp[1] = 0; \
|
|
||||||
_pp[0] = 0; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/* PFNs are real physical page numbers. However, mem_map only begins to record
|
/* PFNs are real physical page numbers. However, mem_map only begins to record
|
||||||
* per-page information starting at pfn_base. This is to handle systems where
|
* per-page information starting at pfn_base. This is to handle systems where
|
||||||
* the first physical page in the machine is at some huge physical address,
|
* the first physical page in the machine is at some huge physical address,
|
||||||
|
|||||||
@@ -245,8 +245,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
return nr;
|
return nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||||
struct page **pages)
|
unsigned int gup_flags, struct page **pages)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
unsigned long addr, len, end;
|
unsigned long addr, len, end;
|
||||||
@@ -303,7 +303,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none(pgd))
|
if (pgd_none(pgd))
|
||||||
goto slow;
|
goto slow;
|
||||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
|
||||||
|
pages, &nr))
|
||||||
goto slow;
|
goto slow;
|
||||||
} while (pgdp++, addr = next, addr != end);
|
} while (pgdp++, addr = next, addr != end);
|
||||||
|
|
||||||
@@ -324,7 +325,7 @@ slow:
|
|||||||
|
|
||||||
ret = get_user_pages_unlocked(start,
|
ret = get_user_pages_unlocked(start,
|
||||||
(end - start) >> PAGE_SHIFT, pages,
|
(end - start) >> PAGE_SHIFT, pages,
|
||||||
write ? FOLL_WRITE : 0);
|
gup_flags);
|
||||||
|
|
||||||
/* Have to be a bit careful with return values */
|
/* Have to be a bit careful with return values */
|
||||||
if (nr > 0) {
|
if (nr > 0) {
|
||||||
|
|||||||
@@ -294,19 +294,6 @@ void __init mem_init(void)
|
|||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_initmem (void)
|
|
||||||
{
|
|
||||||
free_initmem_default(POISON_FREE_INITMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
|
||||||
"initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void sparc_flush_page_to_ram(struct page *page)
|
void sparc_flush_page_to_ram(struct page *page)
|
||||||
{
|
{
|
||||||
unsigned long vaddr = (unsigned long)page_address(page);
|
unsigned long vaddr = (unsigned long)page_address(page);
|
||||||
|
|||||||
@@ -2572,14 +2572,6 @@ void free_initmem(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
|
||||||
"initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
pgprot_t PAGE_KERNEL __read_mostly;
|
pgprot_t PAGE_KERNEL __read_mostly;
|
||||||
EXPORT_SYMBOL(PAGE_KERNEL);
|
EXPORT_SYMBOL(PAGE_KERNEL);
|
||||||
|
|
||||||
|
|||||||
@@ -188,13 +188,6 @@ void free_initmem(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Allocate and free page tables. */
|
/* Allocate and free page tables. */
|
||||||
|
|
||||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ config UNICORE32
|
|||||||
def_bool y
|
def_bool y
|
||||||
select ARCH_32BIT_OFF_T
|
select ARCH_32BIT_OFF_T
|
||||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||||
|
select ARCH_HAS_KEEPINITRD
|
||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||||
select HAVE_KERNEL_GZIP
|
select HAVE_KERNEL_GZIP
|
||||||
|
|||||||
@@ -287,27 +287,3 @@ void __init mem_init(void)
|
|||||||
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
|
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
|
|
||||||
static int keep_initrd;
|
|
||||||
|
|
||||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
if (!keep_initrd)
|
|
||||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init keepinitrd_setup(char *__unused)
|
|
||||||
{
|
|
||||||
keep_initrd = 1;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
__setup("keepinitrd", keepinitrd_setup);
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ config X86_64
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on 64BIT
|
depends on 64BIT
|
||||||
# Options that are inherently 64-bit kernel only:
|
# Options that are inherently 64-bit kernel only:
|
||||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
select ARCH_SUPPORTS_INT128
|
select ARCH_SUPPORTS_INT128
|
||||||
select ARCH_USE_CMPXCHG_LOCKREF
|
select ARCH_USE_CMPXCHG_LOCKREF
|
||||||
select HAVE_ARCH_SOFT_DIRTY
|
select HAVE_ARCH_SOFT_DIRTY
|
||||||
@@ -47,7 +47,6 @@ config X86
|
|||||||
select ARCH_32BIT_OFF_T if X86_32
|
select ARCH_32BIT_OFF_T if X86_32
|
||||||
select ARCH_CLOCKSOURCE_DATA
|
select ARCH_CLOCKSOURCE_DATA
|
||||||
select ARCH_CLOCKSOURCE_INIT
|
select ARCH_CLOCKSOURCE_INIT
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
|
||||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||||
select ARCH_HAS_DEBUG_VIRTUAL
|
select ARCH_HAS_DEBUG_VIRTUAL
|
||||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||||
|
|||||||
@@ -17,8 +17,4 @@ static inline void arch_clear_hugepage_flags(struct page *page)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
|
||||||
static inline bool gigantic_page_supported(void) { return true; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_HUGETLB_H */
|
#endif /* _ASM_X86_HUGETLB_H */
|
||||||
|
|||||||
@@ -140,7 +140,7 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|||||||
pt_element_t *table;
|
pt_element_t *table;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
|
npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
|
||||||
/* Check if the user is doing something meaningless. */
|
/* Check if the user is doing something meaningless. */
|
||||||
if (unlikely(npages != 1))
|
if (unlikely(npages != 1))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|||||||
@@ -1805,7 +1805,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Pin the user virtual address. */
|
/* Pin the user virtual address. */
|
||||||
npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
|
npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
|
||||||
if (npinned != npages) {
|
if (npinned != npages) {
|
||||||
pr_err("SEV: Failure locking %lu pages.\n", npages);
|
pr_err("SEV: Failure locking %lu pages.\n", npages);
|
||||||
goto err;
|
goto err;
|
||||||
|
|||||||
@@ -203,7 +203,7 @@ static __init int setup_hugepagesz(char *opt)
|
|||||||
}
|
}
|
||||||
__setup("hugepagesz=", setup_hugepagesz);
|
__setup("hugepagesz=", setup_hugepagesz);
|
||||||
|
|
||||||
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
|
#ifdef CONFIG_CONTIG_ALLOC
|
||||||
static __init int gigantic_pages_init(void)
|
static __init int gigantic_pages_init(void)
|
||||||
{
|
{
|
||||||
/* With compaction or CMA we can allocate gigantic pages at runtime */
|
/* With compaction or CMA we can allocate gigantic pages at runtime */
|
||||||
|
|||||||
@@ -850,24 +850,25 @@ void __init mem_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
return __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
void arch_remove_memory(int nid, u64 start, u64 size,
|
||||||
|
struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
zone = page_zone(pfn_to_page(start_pfn));
|
||||||
return __remove_pages(zone, start_pfn, nr_pages, altmap);
|
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -777,11 +777,11 @@ static void update_end_of_memory_vars(u64 start, u64 size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||||
struct vmem_altmap *altmap, bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
WARN_ON_ONCE(ret);
|
WARN_ON_ONCE(ret);
|
||||||
|
|
||||||
/* update max_pfn, max_low_pfn and high_memory */
|
/* update max_pfn, max_low_pfn and high_memory */
|
||||||
@@ -791,15 +791,15 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
init_memory_mapping(start, start + size);
|
init_memory_mapping(start, start + size);
|
||||||
|
|
||||||
return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
return add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PAGE_INUSE 0xFD
|
#define PAGE_INUSE 0xFD
|
||||||
@@ -1141,24 +1141,20 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
|
|||||||
remove_pagetable(start, end, true, NULL);
|
remove_pagetable(start, end, true, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __ref arch_remove_memory(int nid, u64 start, u64 size,
|
void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
||||||
struct vmem_altmap *altmap)
|
struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct page *page = pfn_to_page(start_pfn);
|
struct page *page = pfn_to_page(start_pfn);
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* With altmap the first mapped page is offset from @start */
|
/* With altmap the first mapped page is offset from @start */
|
||||||
if (altmap)
|
if (altmap)
|
||||||
page += vmem_altmap_offset(altmap);
|
page += vmem_altmap_offset(altmap);
|
||||||
zone = page_zone(page);
|
zone = page_zone(page);
|
||||||
ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
|
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||||
WARN_ON_ONCE(ret);
|
|
||||||
kernel_physical_mapping_remove(start, start + size);
|
kernel_physical_mapping_remove(start, start + size);
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|||||||
@@ -216,11 +216,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void free_initmem(void)
|
|
||||||
{
|
|
||||||
free_initmem_default(-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init parse_memmap_one(char *p)
|
static void __init parse_memmap_one(char *p)
|
||||||
{
|
{
|
||||||
char *oldp;
|
char *oldp;
|
||||||
|
|||||||
@@ -231,13 +231,14 @@ static bool pages_correctly_probed(unsigned long start_pfn)
|
|||||||
* OK to have direct references to sparsemem variables in here.
|
* OK to have direct references to sparsemem variables in here.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
|
memory_block_action(unsigned long start_section_nr, unsigned long action,
|
||||||
|
int online_type)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn;
|
unsigned long start_pfn;
|
||||||
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
|
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
start_pfn = section_nr_to_pfn(phys_index);
|
start_pfn = section_nr_to_pfn(start_section_nr);
|
||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case MEM_ONLINE:
|
case MEM_ONLINE:
|
||||||
@@ -251,7 +252,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
|
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
|
||||||
"%ld\n", __func__, phys_index, action, action);
|
"%ld\n", __func__, start_section_nr, action, action);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -733,16 +734,18 @@ unregister_memory(struct memory_block *memory)
|
|||||||
{
|
{
|
||||||
BUG_ON(memory->dev.bus != &memory_subsys);
|
BUG_ON(memory->dev.bus != &memory_subsys);
|
||||||
|
|
||||||
/* drop the ref. we got in remove_memory_section() */
|
/* drop the ref. we got via find_memory_block() */
|
||||||
put_device(&memory->dev);
|
put_device(&memory->dev);
|
||||||
device_unregister(&memory->dev);
|
device_unregister(&memory->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int remove_memory_section(unsigned long node_id,
|
void unregister_memory_section(struct mem_section *section)
|
||||||
struct mem_section *section, int phys_device)
|
|
||||||
{
|
{
|
||||||
struct memory_block *mem;
|
struct memory_block *mem;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!present_section(section)))
|
||||||
|
return;
|
||||||
|
|
||||||
mutex_lock(&mem_sysfs_mutex);
|
mutex_lock(&mem_sysfs_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -763,15 +766,6 @@ static int remove_memory_section(unsigned long node_id,
|
|||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&mem_sysfs_mutex);
|
mutex_unlock(&mem_sysfs_mutex);
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int unregister_memory_section(struct mem_section *section)
|
|
||||||
{
|
|
||||||
if (!present_section(section))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return remove_memory_section(0, section, 0);
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||||
|
|
||||||
|
|||||||
@@ -184,8 +184,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
|||||||
|
|
||||||
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
||||||
|
|
||||||
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
|
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
|
||||||
vmf->flags & FAULT_FLAG_WRITE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||||
@@ -235,8 +234,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
|||||||
|
|
||||||
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
||||||
|
|
||||||
return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
|
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
|
||||||
vmf->flags & FAULT_FLAG_WRITE);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
||||||
|
|||||||
@@ -107,19 +107,8 @@ EXPORT_SYMBOL(fw_iso_buffer_init);
|
|||||||
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
|
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
unsigned long uaddr;
|
return vm_map_pages_zero(vma, buffer->pages,
|
||||||
int i, err;
|
buffer->page_count);
|
||||||
|
|
||||||
uaddr = vma->vm_start;
|
|
||||||
for (i = 0; i < buffer->page_count; i++) {
|
|
||||||
err = vm_insert_page(vma, uaddr, buffer->pages[i]);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
uaddr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
|
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
|
|||||||
goto unlock_vm;
|
goto unlock_vm;
|
||||||
}
|
}
|
||||||
|
|
||||||
pinned = get_user_pages_fast(region->user_addr, npages, 1,
|
pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
|
||||||
region->pages);
|
region->pages);
|
||||||
if (pinned < 0) {
|
if (pinned < 0) {
|
||||||
ret = pinned;
|
ret = pinned;
|
||||||
|
|||||||
@@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
|||||||
/* TODO we should be able to split locking for interval tree and
|
/* TODO we should be able to split locking for interval tree and
|
||||||
* amdgpu_mn_invalidate_node
|
* amdgpu_mn_invalidate_node
|
||||||
*/
|
*/
|
||||||
if (amdgpu_mn_read_lock(amn, range->blockable))
|
if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
||||||
while (it) {
|
while (it) {
|
||||||
struct amdgpu_mn_node *node;
|
struct amdgpu_mn_node *node;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
amdgpu_mn_read_unlock(amn);
|
amdgpu_mn_read_unlock(amn);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
@@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
|||||||
/* notification is exclusive, but interval is inclusive */
|
/* notification is exclusive, but interval is inclusive */
|
||||||
end = range->end - 1;
|
end = range->end - 1;
|
||||||
|
|
||||||
if (amdgpu_mn_read_lock(amn, range->blockable))
|
if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
||||||
@@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
|||||||
struct amdgpu_mn_node *node;
|
struct amdgpu_mn_node *node;
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
amdgpu_mn_read_unlock(amn);
|
amdgpu_mn_read_unlock(amn);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
|||||||
while (it) {
|
while (it) {
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
/* TODO we should be able to split locking for interval tree and
|
/* TODO we should be able to split locking for interval tree and
|
||||||
* the tear down.
|
* the tear down.
|
||||||
*/
|
*/
|
||||||
if (range->blockable)
|
if (mmu_notifier_range_blockable(range))
|
||||||
mutex_lock(&rmn->lock);
|
mutex_lock(&rmn->lock);
|
||||||
else if (!mutex_trylock(&rmn->lock))
|
else if (!mutex_trylock(&rmn->lock))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
@@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
struct radeon_bo *bo;
|
struct radeon_bo *bo;
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -221,26 +221,13 @@ static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
|
|||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
|
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
|
||||||
unsigned int i, count = obj->size >> PAGE_SHIFT;
|
unsigned int count = obj->size >> PAGE_SHIFT;
|
||||||
unsigned long user_count = vma_pages(vma);
|
unsigned long user_count = vma_pages(vma);
|
||||||
unsigned long uaddr = vma->vm_start;
|
|
||||||
unsigned long offset = vma->vm_pgoff;
|
|
||||||
unsigned long end = user_count + offset;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (user_count == 0)
|
if (user_count == 0)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
if (end > count)
|
|
||||||
return -ENXIO;
|
|
||||||
|
|
||||||
for (i = offset; i < end; i++) {
|
return vm_map_pages(vma, rk_obj->pages, count);
|
||||||
ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
uaddr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
|
static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
|
||||||
|
|||||||
@@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||||||
if (NULL == vsg->pages)
|
if (NULL == vsg->pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
|
ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
|
||||||
vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
|
vsg->num_pages,
|
||||||
|
vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
|
||||||
vsg->pages);
|
vsg->pages);
|
||||||
if (ret != vsg->num_pages) {
|
if (ret != vsg->num_pages) {
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
|||||||
@@ -224,8 +224,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
|
|||||||
static int gem_mmap_obj(struct xen_gem_object *xen_obj,
|
static int gem_mmap_obj(struct xen_gem_object *xen_obj,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
unsigned long addr = vma->vm_start;
|
int ret;
|
||||||
int i;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
|
* clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
|
||||||
@@ -252,18 +251,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
|
|||||||
* FIXME: as we insert all the pages now then no .fault handler must
|
* FIXME: as we insert all the pages now then no .fault handler must
|
||||||
* be called, so don't provide one
|
* be called, so don't provide one
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < xen_obj->num_pages; i++) {
|
ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
|
||||||
int ret;
|
if (ret < 0)
|
||||||
|
DRM_ERROR("Failed to map pages into vma: %d\n", ret);
|
||||||
|
|
||||||
ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
|
return ret;
|
||||||
if (ret < 0) {
|
|
||||||
DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
addr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
|
|||||||
@@ -295,10 +295,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
|
|||||||
|
|
||||||
while (npages) {
|
while (npages) {
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
ret = get_user_pages_longterm(cur_base,
|
ret = get_user_pages(cur_base,
|
||||||
min_t(unsigned long, npages,
|
min_t(unsigned long, npages,
|
||||||
PAGE_SIZE / sizeof (struct page *)),
|
PAGE_SIZE / sizeof (struct page *)),
|
||||||
gup_flags, page_list, NULL);
|
gup_flags | FOLL_LONGTERM,
|
||||||
|
page_list, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
goto umem_release;
|
goto umem_release;
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
struct ib_ucontext_per_mm *per_mm =
|
struct ib_ucontext_per_mm *per_mm =
|
||||||
container_of(mn, struct ib_ucontext_per_mm, mn);
|
container_of(mn, struct ib_ucontext_per_mm, mn);
|
||||||
|
|
||||||
if (range->blockable)
|
if (mmu_notifier_range_blockable(range))
|
||||||
down_read(&per_mm->umem_rwsem);
|
down_read(&per_mm->umem_rwsem);
|
||||||
else if (!down_read_trylock(&per_mm->umem_rwsem))
|
else if (!down_read_trylock(&per_mm->umem_rwsem))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
@@ -170,7 +170,8 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
|
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
|
||||||
range->end,
|
range->end,
|
||||||
invalidate_range_start_trampoline,
|
invalidate_range_start_trampoline,
|
||||||
range->blockable, NULL);
|
mmu_notifier_range_blockable(range),
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
|
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
|
||||||
|
|||||||
@@ -104,8 +104,9 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
|
|||||||
bool writable, struct page **pages)
|
bool writable, struct page **pages)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
|
||||||
|
|
||||||
ret = get_user_pages_fast(vaddr, npages, writable, pages);
|
ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|||||||
@@ -472,7 +472,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages);
|
ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
|
||||||
|
FOLL_WRITE | FOLL_LONGTERM, pages);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|||||||
@@ -114,10 +114,10 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
|
|||||||
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
down_read(¤t->mm->mmap_sem);
|
||||||
for (got = 0; got < num_pages; got += ret) {
|
for (got = 0; got < num_pages; got += ret) {
|
||||||
ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
|
ret = get_user_pages(start_page + got * PAGE_SIZE,
|
||||||
num_pages - got,
|
num_pages - got,
|
||||||
FOLL_WRITE | FOLL_FORCE,
|
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
|
||||||
p + got, NULL);
|
p + got, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
up_read(¤t->mm->mmap_sem);
|
||||||
goto bail_release;
|
goto bail_release;
|
||||||
|
|||||||
@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
|||||||
else
|
else
|
||||||
j = npages;
|
j = npages;
|
||||||
|
|
||||||
ret = get_user_pages_fast(addr, j, 0, pages);
|
ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
|
||||||
if (ret != j) {
|
if (ret != j) {
|
||||||
i = 0;
|
i = 0;
|
||||||
j = ret;
|
j = ret;
|
||||||
|
|||||||
@@ -143,10 +143,11 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
while (npages) {
|
while (npages) {
|
||||||
ret = get_user_pages_longterm(cur_base,
|
ret = get_user_pages(cur_base,
|
||||||
min_t(unsigned long, npages,
|
min_t(unsigned long, npages,
|
||||||
PAGE_SIZE / sizeof(struct page *)),
|
PAGE_SIZE / sizeof(struct page *)),
|
||||||
gup_flags, page_list, NULL);
|
gup_flags | FOLL_LONGTERM,
|
||||||
|
page_list, NULL);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|||||||
@@ -619,17 +619,7 @@ out_free_pages:
|
|||||||
|
|
||||||
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
|
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
unsigned long uaddr = vma->vm_start;
|
return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||||
unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
||||||
int ret = -ENXIO;
|
|
||||||
|
|
||||||
for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
|
|
||||||
ret = vm_insert_page(vma, uaddr, pages[i]);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
uaddr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||||
|
|||||||
@@ -2201,6 +2201,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
|
||||||
|
* not as a in-buffer offset. We always want to mmap a whole buffer
|
||||||
|
* from its beginning.
|
||||||
|
*/
|
||||||
|
vma->vm_pgoff = 0;
|
||||||
|
|
||||||
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
|
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
|
|||||||
@@ -186,12 +186,6 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
|
|
||||||
* map whole buffer
|
|
||||||
*/
|
|
||||||
vma->vm_pgoff = 0;
|
|
||||||
|
|
||||||
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
|
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
|
||||||
buf->dma_addr, buf->size, buf->attrs);
|
buf->dma_addr, buf->size, buf->attrs);
|
||||||
|
|
||||||
|
|||||||
@@ -328,28 +328,18 @@ static unsigned int vb2_dma_sg_num_users(void *buf_priv)
|
|||||||
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
|
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||||
unsigned long uaddr = vma->vm_start;
|
int err;
|
||||||
unsigned long usize = vma->vm_end - vma->vm_start;
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
printk(KERN_ERR "No memory to map\n");
|
printk(KERN_ERR "No memory to map\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
err = vm_map_pages(vma, buf->pages, buf->num_pages);
|
||||||
int ret;
|
if (err) {
|
||||||
|
printk(KERN_ERR "Remapping memory, error: %d\n", err);
|
||||||
ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
|
return err;
|
||||||
if (ret) {
|
}
|
||||||
printk(KERN_ERR "Remapping memory, error: %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
uaddr += PAGE_SIZE;
|
|
||||||
usize -= PAGE_SIZE;
|
|
||||||
} while (usize > 0);
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use common vm_area operations to track buffer refcount.
|
* Use common vm_area operations to track buffer refcount.
|
||||||
|
|||||||
@@ -186,12 +186,12 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
|
|||||||
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
|
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
|
||||||
data, size, dma->nr_pages);
|
data, size, dma->nr_pages);
|
||||||
|
|
||||||
err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
|
err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
|
||||||
flags, dma->pages, NULL);
|
flags | FOLL_LONGTERM, dma->pages, NULL);
|
||||||
|
|
||||||
if (err != dma->nr_pages) {
|
if (err != dma->nr_pages) {
|
||||||
dma->nr_pages = (err >= 0) ? err : 0;
|
dma->nr_pages = (err >= 0) ? err : 0;
|
||||||
dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
|
dprintk(1, "get_user_pages: err=%d [%d]\n", err,
|
||||||
dma->nr_pages);
|
dma->nr_pages);
|
||||||
return err < 0 ? err : -EINVAL;
|
return err < 0 ? err : -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -603,7 +603,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
|
|||||||
/* pin user pages in memory */
|
/* pin user pages in memory */
|
||||||
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
|
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
|
||||||
m->nr_pages,
|
m->nr_pages,
|
||||||
m->write, /* readable/writable */
|
m->write ? FOLL_WRITE : 0, /* readable/writable */
|
||||||
m->page_list); /* ptrs to pages */
|
m->page_list); /* ptrs to pages */
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail_get_user_pages;
|
goto fail_get_user_pages;
|
||||||
|
|||||||
@@ -242,7 +242,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
|
|||||||
/*
|
/*
|
||||||
* Lock physical page backing a given user VA.
|
* Lock physical page backing a given user VA.
|
||||||
*/
|
*/
|
||||||
retval = get_user_pages_fast(uva, 1, 1, &context->notify_page);
|
retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
|
||||||
if (retval != 1) {
|
if (retval != 1) {
|
||||||
context->notify_page = NULL;
|
context->notify_page = NULL;
|
||||||
return VMCI_ERROR_GENERIC;
|
return VMCI_ERROR_GENERIC;
|
||||||
|
|||||||
@@ -659,7 +659,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
|
|||||||
int err = VMCI_SUCCESS;
|
int err = VMCI_SUCCESS;
|
||||||
|
|
||||||
retval = get_user_pages_fast((uintptr_t) produce_uva,
|
retval = get_user_pages_fast((uintptr_t) produce_uva,
|
||||||
produce_q->kernel_if->num_pages, 1,
|
produce_q->kernel_if->num_pages,
|
||||||
|
FOLL_WRITE,
|
||||||
produce_q->kernel_if->u.h.header_page);
|
produce_q->kernel_if->u.h.header_page);
|
||||||
if (retval < (int)produce_q->kernel_if->num_pages) {
|
if (retval < (int)produce_q->kernel_if->num_pages) {
|
||||||
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
|
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
|
||||||
@@ -671,7 +672,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
|
|||||||
}
|
}
|
||||||
|
|
||||||
retval = get_user_pages_fast((uintptr_t) consume_uva,
|
retval = get_user_pages_fast((uintptr_t) consume_uva,
|
||||||
consume_q->kernel_if->num_pages, 1,
|
consume_q->kernel_if->num_pages,
|
||||||
|
FOLL_WRITE,
|
||||||
consume_q->kernel_if->u.h.header_page);
|
consume_q->kernel_if->u.h.header_page);
|
||||||
if (retval < (int)consume_q->kernel_if->num_pages) {
|
if (retval < (int)consume_q->kernel_if->num_pages) {
|
||||||
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
|
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
|
||||||
|
|||||||
@@ -274,7 +274,8 @@ static int pin_user_pages(unsigned long first_page,
|
|||||||
*iter_last_page_size = last_page_size;
|
*iter_last_page_size = last_page_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = get_user_pages_fast(first_page, requested_pages, !is_write,
|
ret = get_user_pages_fast(first_page, requested_pages,
|
||||||
|
!is_write ? FOLL_WRITE : 0,
|
||||||
pages);
|
pages);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|||||||
@@ -868,7 +868,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||||||
|
|
||||||
pinned = get_user_pages_fast(
|
pinned = get_user_pages_fast(
|
||||||
(unsigned long)xfer->loc_addr & PAGE_MASK,
|
(unsigned long)xfer->loc_addr & PAGE_MASK,
|
||||||
nr_pages, dir == DMA_FROM_DEVICE, page_list);
|
nr_pages,
|
||||||
|
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
|
||||||
|
page_list);
|
||||||
|
|
||||||
if (pinned != nr_pages) {
|
if (pinned != nr_pages) {
|
||||||
if (pinned < 0) {
|
if (pinned < 0) {
|
||||||
|
|||||||
@@ -437,7 +437,7 @@ static int dax_lock_page(void *va, struct page **p)
|
|||||||
|
|
||||||
dax_dbg("uva %p", va);
|
dax_dbg("uva %p", va);
|
||||||
|
|
||||||
ret = get_user_pages_fast((unsigned long)va, 1, 1, p);
|
ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
dax_dbg("locked page %p, for VA %p", *p, va);
|
dax_dbg("locked page %p, for VA %p", *p, va);
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -4922,7 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
|||||||
|
|
||||||
/* Try to fault in all of the necessary pages */
|
/* Try to fault in all of the necessary pages */
|
||||||
/* rw==READ means read from drive, write into memory area */
|
/* rw==READ means read from drive, write into memory area */
|
||||||
res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
|
res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
|
||||||
|
pages);
|
||||||
|
|
||||||
/* Errors and no page mapped should return here */
|
/* Errors and no page mapped should return here */
|
||||||
if (res < nr_pages)
|
if (res < nr_pages)
|
||||||
|
|||||||
@@ -486,8 +486,8 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
|
|||||||
ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
|
ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
|
||||||
off + i * PAGE_SIZE;
|
off + i * PAGE_SIZE;
|
||||||
} else {
|
} else {
|
||||||
ret = get_user_pages_fast(page_addr - offset, 1, 1,
|
ret = get_user_pages_fast(page_addr - offset, 1,
|
||||||
&page);
|
FOLL_WRITE, &page);
|
||||||
|
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
dev_err(pg_tbl->device,
|
dev_err(pg_tbl->device,
|
||||||
|
|||||||
@@ -273,7 +273,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
|
rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
|
||||||
if (rc > 0)
|
if (rc > 0)
|
||||||
shm->num_pages = rc;
|
shm->num_pages = rc;
|
||||||
if (rc != num_pages) {
|
if (rc != num_pages) {
|
||||||
|
|||||||
@@ -532,7 +532,8 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
|
|||||||
enum dma_data_direction direction = iommu_tce_direction(tce);
|
enum dma_data_direction direction = iommu_tce_direction(tce);
|
||||||
|
|
||||||
if (get_user_pages_fast(tce & PAGE_MASK, 1,
|
if (get_user_pages_fast(tce & PAGE_MASK, 1,
|
||||||
direction != DMA_TO_DEVICE, &page) != 1)
|
direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
|
||||||
|
&page) != 1)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
*hpa = __pa((unsigned long) page_address(page));
|
*hpa = __pa((unsigned long) page_address(page));
|
||||||
|
|||||||
@@ -358,7 +358,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
|||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
if (mm == current->mm) {
|
if (mm == current->mm) {
|
||||||
ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
|
ret = get_user_pages(vaddr, 1, flags | FOLL_LONGTERM, page,
|
||||||
|
vmas);
|
||||||
} else {
|
} else {
|
||||||
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
|
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
|
||||||
vmas, NULL);
|
vmas, NULL);
|
||||||
|
|||||||
@@ -1704,7 +1704,7 @@ static int set_bit_to_user(int nr, void __user *addr)
|
|||||||
int bit = nr + (log % PAGE_SIZE) * 8;
|
int bit = nr + (log % PAGE_SIZE) * 8;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = get_user_pages_fast(log, 1, 1, &page);
|
r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
BUG_ON(r != 1);
|
BUG_ON(r != 1);
|
||||||
|
|||||||
@@ -686,7 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
|
|||||||
if (!pages)
|
if (!pages)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages);
|
ret = get_user_pages_fast((unsigned long)buf, nr_pages, FOLL_WRITE, pages);
|
||||||
if (ret < nr_pages) {
|
if (ret < nr_pages) {
|
||||||
nr_pages = ret;
|
nr_pages = ret;
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|||||||
@@ -244,7 +244,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|||||||
|
|
||||||
/* Get the physical addresses of the source buffer */
|
/* Get the physical addresses of the source buffer */
|
||||||
num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
|
num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
|
||||||
num_pages, param.source != -1, pages);
|
num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
|
||||||
|
|
||||||
if (num_pinned != num_pages) {
|
if (num_pinned != num_pages) {
|
||||||
/* get_user_pages() failed */
|
/* get_user_pages() failed */
|
||||||
|
|||||||
@@ -526,20 +526,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
|
|||||||
struct gntdev_grant_map *map;
|
struct gntdev_grant_map *map;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (range->blockable)
|
if (mmu_notifier_range_blockable(range))
|
||||||
mutex_lock(&priv->lock);
|
mutex_lock(&priv->lock);
|
||||||
else if (!mutex_trylock(&priv->lock))
|
else if (!mutex_trylock(&priv->lock))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
list_for_each_entry(map, &priv->maps, next) {
|
list_for_each_entry(map, &priv->maps, next) {
|
||||||
ret = unmap_if_in_range(map, range->start, range->end,
|
ret = unmap_if_in_range(map, range->start, range->end,
|
||||||
range->blockable);
|
mmu_notifier_range_blockable(range));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
list_for_each_entry(map, &priv->freeable_maps, next) {
|
list_for_each_entry(map, &priv->freeable_maps, next) {
|
||||||
ret = unmap_if_in_range(map, range->start, range->end,
|
ret = unmap_if_in_range(map, range->start, range->end,
|
||||||
range->blockable);
|
mmu_notifier_range_blockable(range));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
@@ -852,7 +852,7 @@ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
|
|||||||
unsigned long xen_pfn;
|
unsigned long xen_pfn;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = get_user_pages_fast(addr, 1, writeable, &page);
|
ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -1084,7 +1084,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
|||||||
int index = vma->vm_pgoff;
|
int index = vma->vm_pgoff;
|
||||||
int count = vma_pages(vma);
|
int count = vma_pages(vma);
|
||||||
struct gntdev_grant_map *map;
|
struct gntdev_grant_map *map;
|
||||||
int i, err = -EINVAL;
|
int err = -EINVAL;
|
||||||
|
|
||||||
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
|
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -1145,12 +1145,9 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
|||||||
goto out_put_map;
|
goto out_put_map;
|
||||||
|
|
||||||
if (!use_ptemod) {
|
if (!use_ptemod) {
|
||||||
for (i = 0; i < count; i++) {
|
err = vm_map_pages(vma, map->pages, map->count);
|
||||||
err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
|
if (err)
|
||||||
map->pages[i]);
|
goto out_put_map;
|
||||||
if (err)
|
|
||||||
goto out_put_map;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
/*
|
/*
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user