Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - large KASAN update to use arm's "software tag-based mode" - a few misc things - sh updates - ocfs2 updates - just about all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits) kernel/fork.c: mark 'stack_vm_area' with __maybe_unused memcg, oom: notify on oom killer invocation from the charge path mm, swap: fix swapoff with KSM pages include/linux/gfp.h: fix typo mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization memory_hotplug: add missing newlines to debugging output mm: remove __hugepage_set_anon_rmap() include/linux/vmstat.h: remove unused page state adjustment macro mm/page_alloc.c: allow error injection mm: migrate: drop unused argument of migrate_page_move_mapping() blkdev: avoid migration stalls for blkdev pages mm: migrate: provide buffer_migrate_page_norefs() mm: migrate: move migrate_page_lock_buffers() mm: migrate: lock buffers before migrate_page_move_mapping() mm: migration: factor out code to compute expected number of page references mm, page_alloc: enable pcpu_drain with zone capability kmemleak: add config to select auto scan mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init ...
This commit is contained in:
@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
|
||||
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
|
||||
*
|
||||
* @mn: our notifier
|
||||
* @mm: the mm this callback is about
|
||||
* @start: start of updated range
|
||||
* @end: end of updated range
|
||||
* @range: mmu notifier context
|
||||
*
|
||||
* Block for operations on BOs to finish and mark pages as accessed and
|
||||
* potentially dirty.
|
||||
*/
|
||||
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
|
||||
struct interval_tree_node *it;
|
||||
unsigned long end;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
end = range->end - 1;
|
||||
|
||||
/* TODO we should be able to split locking for interval tree and
|
||||
* amdgpu_mn_invalidate_node
|
||||
*/
|
||||
if (amdgpu_mn_read_lock(amn, blockable))
|
||||
if (amdgpu_mn_read_lock(amn, range->blockable))
|
||||
return -EAGAIN;
|
||||
|
||||
it = interval_tree_iter_first(&amn->objects, start, end);
|
||||
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
||||
while (it) {
|
||||
struct amdgpu_mn_node *node;
|
||||
|
||||
if (!blockable) {
|
||||
if (!range->blockable) {
|
||||
amdgpu_mn_read_unlock(amn);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
it = interval_tree_iter_next(it, range->start, end);
|
||||
|
||||
amdgpu_mn_invalidate_node(node, start, end);
|
||||
amdgpu_mn_invalidate_node(node, range->start, end);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
||||
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
|
||||
*/
|
||||
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
|
||||
struct interval_tree_node *it;
|
||||
unsigned long end;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
end = range->end - 1;
|
||||
|
||||
if (amdgpu_mn_read_lock(amn, blockable))
|
||||
if (amdgpu_mn_read_lock(amn, range->blockable))
|
||||
return -EAGAIN;
|
||||
|
||||
it = interval_tree_iter_first(&amn->objects, start, end);
|
||||
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
||||
while (it) {
|
||||
struct amdgpu_mn_node *node;
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
if (!blockable) {
|
||||
if (!range->blockable) {
|
||||
amdgpu_mn_read_unlock(amn);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
it = interval_tree_iter_next(it, range->start, end);
|
||||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
struct kgd_mem *mem = bo->kfd_bo;
|
||||
|
||||
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
|
||||
start, end))
|
||||
amdgpu_amdkfd_evict_userptr(mem, mm);
|
||||
range->start,
|
||||
end))
|
||||
amdgpu_amdkfd_evict_userptr(mem, range->mm);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
||||
* Release the lock again to allow new command submissions.
|
||||
*/
|
||||
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
|
||||
|
||||
|
@@ -853,7 +853,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
|
||||
*/
|
||||
pgdat = NODE_DATA(numa_node_id);
|
||||
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
|
||||
mem_in_bytes += pgdat->node_zones[zone_type].managed_pages;
|
||||
mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
|
||||
mem_in_bytes <<= PAGE_SHIFT;
|
||||
|
||||
sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
|
||||
|
@@ -2559,7 +2559,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
* If there's no chance of allocating enough pages for the whole
|
||||
* object, bail early.
|
||||
*/
|
||||
if (page_count > totalram_pages)
|
||||
if (page_count > totalram_pages())
|
||||
return -ENOMEM;
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
|
@@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo)
|
||||
}
|
||||
|
||||
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct i915_mmu_notifier *mn =
|
||||
container_of(_mn, struct i915_mmu_notifier, mn);
|
||||
struct i915_mmu_object *mo;
|
||||
struct interval_tree_node *it;
|
||||
LIST_HEAD(cancelled);
|
||||
unsigned long end;
|
||||
|
||||
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
|
||||
return 0;
|
||||
|
||||
/* interval ranges are inclusive, but invalidate range is exclusive */
|
||||
end--;
|
||||
end = range->end - 1;
|
||||
|
||||
spin_lock(&mn->lock);
|
||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||
it = interval_tree_iter_first(&mn->objects, range->start, end);
|
||||
while (it) {
|
||||
if (!blockable) {
|
||||
if (!range->blockable) {
|
||||
spin_unlock(&mn->lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
@@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
queue_work(mn->wq, &mo->work);
|
||||
|
||||
list_add(&mo->link, &cancelled);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
it = interval_tree_iter_next(it, range->start, end);
|
||||
}
|
||||
list_for_each_entry(mo, &cancelled, link)
|
||||
del_object(mo);
|
||||
|
@@ -170,7 +170,7 @@ static int igt_ppgtt_alloc(void *arg)
|
||||
* This should ensure that we do not run into the oomkiller during
|
||||
* the test and take down the machine wilfully.
|
||||
*/
|
||||
limit = totalram_pages << PAGE_SHIFT;
|
||||
limit = totalram_pages() << PAGE_SHIFT;
|
||||
limit = min(ppgtt->vm.total, limit);
|
||||
|
||||
/* Check we can allocate the entire range */
|
||||
@@ -1244,7 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
|
||||
u64 hole_start, u64 hole_end,
|
||||
unsigned long end_time))
|
||||
{
|
||||
const u64 limit = totalram_pages << PAGE_SHIFT;
|
||||
const u64 limit = totalram_pages() << PAGE_SHIFT;
|
||||
struct i915_gem_context *ctx;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
IGT_TIMEOUT(end_time);
|
||||
|
@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn,
|
||||
* unmap them by move them into system domain again.
|
||||
*/
|
||||
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct interval_tree_node *it;
|
||||
unsigned long end;
|
||||
int ret = 0;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
end = range->end - 1;
|
||||
|
||||
/* TODO we should be able to split locking for interval tree and
|
||||
* the tear down.
|
||||
*/
|
||||
if (blockable)
|
||||
if (range->blockable)
|
||||
mutex_lock(&rmn->lock);
|
||||
else if (!mutex_trylock(&rmn->lock))
|
||||
return -EAGAIN;
|
||||
|
||||
it = interval_tree_iter_first(&rmn->objects, start, end);
|
||||
it = interval_tree_iter_first(&rmn->objects, range->start, end);
|
||||
while (it) {
|
||||
struct radeon_mn_node *node;
|
||||
struct radeon_bo *bo;
|
||||
long r;
|
||||
|
||||
if (!blockable) {
|
||||
if (!range->blockable) {
|
||||
ret = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
node = container_of(it, struct radeon_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
it = interval_tree_iter_next(it, range->start, end);
|
||||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
|
Reference in New Issue
Block a user