Merge drm-fixes into drm-next.
Nouveau wanted this to avoid some worse conflicts when I merge that.
Этот коммит содержится в:
@@ -328,7 +328,7 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
|
||||
return 0;
|
||||
|
||||
out_destroy_stat:
|
||||
while (--i)
|
||||
while (i--)
|
||||
percpu_counter_destroy(&wb->stat[i]);
|
||||
fprop_local_destroy_percpu(&wb->completions);
|
||||
out_put_cong:
|
||||
|
38
mm/filemap.c
38
mm/filemap.c
@@ -195,6 +195,30 @@ void __delete_from_page_cache(struct page *page, void *shadow,
|
||||
else
|
||||
cleancache_invalidate_page(mapping, page);
|
||||
|
||||
VM_BUG_ON_PAGE(page_mapped(page), page);
|
||||
if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
|
||||
int mapcount;
|
||||
|
||||
pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
|
||||
current->comm, page_to_pfn(page));
|
||||
dump_page(page, "still mapped when deleted");
|
||||
dump_stack();
|
||||
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
|
||||
|
||||
mapcount = page_mapcount(page);
|
||||
if (mapping_exiting(mapping) &&
|
||||
page_count(page) >= mapcount + 2) {
|
||||
/*
|
||||
* All vmas have already been torn down, so it's
|
||||
* a good bet that actually the page is unmapped,
|
||||
* and we'd prefer not to leak it: if we're wrong,
|
||||
* some other bad page check should catch it later.
|
||||
*/
|
||||
page_mapcount_reset(page);
|
||||
atomic_sub(mapcount, &page->_count);
|
||||
}
|
||||
}
|
||||
|
||||
page_cache_tree_delete(mapping, page, shadow);
|
||||
|
||||
page->mapping = NULL;
|
||||
@@ -205,7 +229,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
|
||||
__dec_zone_page_state(page, NR_FILE_PAGES);
|
||||
if (PageSwapBacked(page))
|
||||
__dec_zone_page_state(page, NR_SHMEM);
|
||||
VM_BUG_ON_PAGE(page_mapped(page), page);
|
||||
|
||||
/*
|
||||
* At this point page must be either written or cleaned by truncate.
|
||||
@@ -446,7 +469,8 @@ int filemap_write_and_wait(struct address_space *mapping)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (mapping->nrpages) {
|
||||
if ((!dax_mapping(mapping) && mapping->nrpages) ||
|
||||
(dax_mapping(mapping) && mapping->nrexceptional)) {
|
||||
err = filemap_fdatawrite(mapping);
|
||||
/*
|
||||
* Even if the above returned error, the pages may be
|
||||
@@ -482,13 +506,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (dax_mapping(mapping) && mapping->nrexceptional) {
|
||||
err = dax_writeback_mapping_range(mapping, lstart, lend);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (mapping->nrpages) {
|
||||
if ((!dax_mapping(mapping) && mapping->nrpages) ||
|
||||
(dax_mapping(mapping) && mapping->nrexceptional)) {
|
||||
err = __filemap_fdatawrite_range(mapping, lstart, lend,
|
||||
WB_SYNC_ALL);
|
||||
/* See comment of filemap_write_and_wait() */
|
||||
@@ -1890,6 +1909,7 @@ EXPORT_SYMBOL(generic_file_read_iter);
|
||||
* page_cache_read - adds requested page to the page cache if not already there
|
||||
* @file: file to read
|
||||
* @offset: page index
|
||||
* @gfp_mask: memory allocation flags
|
||||
*
|
||||
* This adds the requested page to the page cache if it isn't already there,
|
||||
* and schedules an I/O to read in its contents from disk.
|
||||
|
@@ -1700,7 +1700,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
|
||||
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
|
||||
VM_BUG_ON(!pmd_none(*new_pmd));
|
||||
|
||||
if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
|
||||
if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
|
||||
vma_is_anonymous(vma)) {
|
||||
pgtable_t pgtable;
|
||||
pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
|
||||
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
|
||||
@@ -2835,6 +2836,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
pgtable_t pgtable;
|
||||
pmd_t _pmd;
|
||||
bool young, write, dirty;
|
||||
unsigned long addr;
|
||||
int i;
|
||||
|
||||
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
|
||||
@@ -2860,10 +2862,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
young = pmd_young(*pmd);
|
||||
dirty = pmd_dirty(*pmd);
|
||||
|
||||
pmdp_huge_split_prepare(vma, haddr, pmd);
|
||||
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
|
||||
pmd_populate(mm, &_pmd, pgtable);
|
||||
|
||||
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
|
||||
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
|
||||
pte_t entry, *pte;
|
||||
/*
|
||||
* Note that NUMA hinting access restrictions are not
|
||||
@@ -2884,9 +2887,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
}
|
||||
if (dirty)
|
||||
SetPageDirty(page + i);
|
||||
pte = pte_offset_map(&_pmd, haddr);
|
||||
pte = pte_offset_map(&_pmd, addr);
|
||||
BUG_ON(!pte_none(*pte));
|
||||
set_pte_at(mm, haddr, pte, entry);
|
||||
set_pte_at(mm, addr, pte, entry);
|
||||
atomic_inc(&page[i]._mapcount);
|
||||
pte_unmap(pte);
|
||||
}
|
||||
@@ -2936,7 +2939,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
pmd_populate(mm, pmd, pgtable);
|
||||
|
||||
if (freeze) {
|
||||
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
|
||||
for (i = 0; i < HPAGE_PMD_NR; i++) {
|
||||
page_remove_rmap(page + i, false);
|
||||
put_page(page + i);
|
||||
}
|
||||
|
12
mm/hugetlb.c
12
mm/hugetlb.c
@@ -2630,8 +2630,10 @@ static int __init hugetlb_init(void)
|
||||
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
|
||||
}
|
||||
default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
|
||||
if (default_hstate_max_huge_pages)
|
||||
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
|
||||
if (default_hstate_max_huge_pages) {
|
||||
if (!default_hstate.max_huge_pages)
|
||||
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
|
||||
}
|
||||
|
||||
hugetlb_init_hstates();
|
||||
gather_bootmem_prealloc();
|
||||
@@ -2749,7 +2751,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
|
||||
int ret;
|
||||
|
||||
if (!hugepages_supported())
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
table->data = &tmp;
|
||||
table->maxlen = sizeof(unsigned long);
|
||||
@@ -2790,7 +2792,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
|
||||
int ret;
|
||||
|
||||
if (!hugepages_supported())
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
tmp = h->nr_overcommit_huge_pages;
|
||||
|
||||
@@ -3500,7 +3502,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
* COW. Warn that such a situation has occurred as it may not be obvious
|
||||
*/
|
||||
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
|
||||
pr_warning("PID %d killed due to inadequate hugepage pool\n",
|
||||
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
|
||||
current->pid);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -60,6 +61,25 @@ void kasan_unpoison_shadow(const void *address, size_t size)
|
||||
}
|
||||
}
|
||||
|
||||
static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
|
||||
{
|
||||
void *base = task_stack_page(task);
|
||||
size_t size = sp - base;
|
||||
|
||||
kasan_unpoison_shadow(base, size);
|
||||
}
|
||||
|
||||
/* Unpoison the entire stack for a task. */
|
||||
void kasan_unpoison_task_stack(struct task_struct *task)
|
||||
{
|
||||
__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
|
||||
}
|
||||
|
||||
/* Unpoison the stack for the current task beyond a watermark sp value. */
|
||||
asmlinkage void kasan_unpoison_remaining_stack(void *sp)
|
||||
{
|
||||
__kasan_unpoison_stack(current, sp);
|
||||
}
|
||||
|
||||
/*
|
||||
* All functions below always inlined so compiler could
|
||||
|
14
mm/memory.c
14
mm/memory.c
@@ -3404,8 +3404,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
if (unlikely(pmd_none(*pmd)) &&
|
||||
unlikely(__pte_alloc(mm, vma, pmd, address)))
|
||||
return VM_FAULT_OOM;
|
||||
/* if an huge pmd materialized from under us just retry later */
|
||||
if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
|
||||
/*
|
||||
* If a huge pmd materialized under us just retry later. Use
|
||||
* pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
|
||||
* didn't become pmd_trans_huge under us and then back to pmd_none, as
|
||||
* a result of MADV_DONTNEED running immediately after a huge pmd fault
|
||||
* in a different thread of this mm, in turn leading to a misleading
|
||||
* pmd_trans_huge() retval. All we have to ensure is that it is a
|
||||
* regular pmd that we can walk with pte_offset_map() and we can do that
|
||||
* through an atomic read in C, which is what pmd_trans_unstable()
|
||||
* provides.
|
||||
*/
|
||||
if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd)))
|
||||
return 0;
|
||||
/*
|
||||
* A regular pmd is established and it can't morph into a huge pmd
|
||||
|
@@ -532,7 +532,7 @@ retry:
|
||||
nid = page_to_nid(page);
|
||||
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
|
||||
continue;
|
||||
if (PageTail(page) && PageAnon(page)) {
|
||||
if (PageTransCompound(page) && PageAnon(page)) {
|
||||
get_page(page);
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
lock_page(page);
|
||||
|
@@ -1582,7 +1582,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
|
||||
(GFP_HIGHUSER_MOVABLE |
|
||||
__GFP_THISNODE | __GFP_NOMEMALLOC |
|
||||
__GFP_NORETRY | __GFP_NOWARN) &
|
||||
~(__GFP_IO | __GFP_FS), 0);
|
||||
~__GFP_RECLAIM, 0);
|
||||
|
||||
return newpage;
|
||||
}
|
||||
|
34
mm/mmap.c
34
mm/mmap.c
@@ -2664,12 +2664,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
||||
if (!vma || !(vma->vm_flags & VM_SHARED))
|
||||
goto out;
|
||||
|
||||
if (start < vma->vm_start || start + size > vma->vm_end)
|
||||
if (start < vma->vm_start)
|
||||
goto out;
|
||||
|
||||
if (pgoff == linear_page_index(vma, start)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
if (start + size > vma->vm_end) {
|
||||
struct vm_area_struct *next;
|
||||
|
||||
for (next = vma->vm_next; next; next = next->vm_next) {
|
||||
/* hole between vmas ? */
|
||||
if (next->vm_start != next->vm_prev->vm_end)
|
||||
goto out;
|
||||
|
||||
if (next->vm_file != vma->vm_file)
|
||||
goto out;
|
||||
|
||||
if (next->vm_flags != vma->vm_flags)
|
||||
goto out;
|
||||
|
||||
if (start + size <= next->vm_end)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!next)
|
||||
goto out;
|
||||
}
|
||||
|
||||
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
|
||||
@@ -2679,9 +2696,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
||||
flags &= MAP_NONBLOCK;
|
||||
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
|
||||
if (vma->vm_flags & VM_LOCKED) {
|
||||
struct vm_area_struct *tmp;
|
||||
flags |= MAP_LOCKED;
|
||||
|
||||
/* drop PG_Mlocked flag for over-mapped range */
|
||||
munlock_vma_pages_range(vma, start, start + size);
|
||||
for (tmp = vma; tmp->vm_start >= start + size;
|
||||
tmp = tmp->vm_next) {
|
||||
munlock_vma_pages_range(tmp,
|
||||
max(tmp->vm_start, start),
|
||||
min(tmp->vm_end, start + size));
|
||||
}
|
||||
}
|
||||
|
||||
file = get_file(vma->vm_file);
|
||||
|
@@ -160,9 +160,11 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
|
||||
if (next - addr != HPAGE_PMD_SIZE)
|
||||
if (next - addr != HPAGE_PMD_SIZE) {
|
||||
split_huge_pmd(vma, pmd, addr);
|
||||
else {
|
||||
if (pmd_none(*pmd))
|
||||
continue;
|
||||
} else {
|
||||
int nr_ptes = change_huge_pmd(vma, pmd, addr,
|
||||
newprot, prot_numa);
|
||||
|
||||
|
@@ -210,6 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
}
|
||||
}
|
||||
split_huge_pmd(vma, old_pmd, old_addr);
|
||||
if (pmd_none(*old_pmd))
|
||||
continue;
|
||||
VM_BUG_ON(pmd_trans_huge(*old_pmd));
|
||||
}
|
||||
if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
|
||||
|
@@ -90,9 +90,9 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
* ARCHes with special requirements for evicting THP backing TLB entries can
|
||||
* implement this. Otherwise also, it can help optimize normal TLB flush in
|
||||
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
|
||||
* entire TLB TLB if flush span is greater than a threshhold, which will
|
||||
* entire TLB if flush span is greater than a threshold, which will
|
||||
* likely be true for a single huge page. Thus a single thp flush will
|
||||
* invalidate the entire TLB which is not desitable.
|
||||
* invalidate the entire TLB which is not desirable.
|
||||
* e.g. see arch/arc: flush_pmd_tlb_range
|
||||
*/
|
||||
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
|
||||
@@ -195,7 +195,9 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
||||
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
||||
VM_BUG_ON(pmd_trans_huge(*pmdp));
|
||||
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
|
||||
/* collapse entails shooting down ptes not pmd */
|
||||
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
return pmd;
|
||||
}
|
||||
#endif
|
||||
|
12
mm/slab.c
12
mm/slab.c
@@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
||||
|
||||
err = setup_cpu_cache(cachep, gfp);
|
||||
if (err) {
|
||||
__kmem_cache_shutdown(cachep);
|
||||
__kmem_cache_release(cachep);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -2413,13 +2413,14 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
|
||||
}
|
||||
|
||||
int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
||||
{
|
||||
return __kmem_cache_shrink(cachep, false);
|
||||
}
|
||||
|
||||
void __kmem_cache_release(struct kmem_cache *cachep)
|
||||
{
|
||||
int i;
|
||||
struct kmem_cache_node *n;
|
||||
int rc = __kmem_cache_shrink(cachep, false);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
free_percpu(cachep->cpu_cache);
|
||||
|
||||
@@ -2430,7 +2431,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
||||
kfree(n);
|
||||
cachep->node[i] = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -140,6 +140,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
||||
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
|
||||
|
||||
int __kmem_cache_shutdown(struct kmem_cache *);
|
||||
void __kmem_cache_release(struct kmem_cache *);
|
||||
int __kmem_cache_shrink(struct kmem_cache *, bool);
|
||||
void slab_kmem_cache_release(struct kmem_cache *);
|
||||
|
||||
|
@@ -693,6 +693,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
|
||||
|
||||
void slab_kmem_cache_release(struct kmem_cache *s)
|
||||
{
|
||||
__kmem_cache_release(s);
|
||||
destroy_memcg_params(s);
|
||||
kfree_const(s->name);
|
||||
kmem_cache_free(kmem_cache, s);
|
||||
|
@@ -630,6 +630,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kmem_cache_release(struct kmem_cache *c)
|
||||
{
|
||||
}
|
||||
|
||||
int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
|
||||
{
|
||||
return 0;
|
||||
|
38
mm/slub.c
38
mm/slub.c
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
|
||||
__add_partial(n, page, tail);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__remove_partial(struct kmem_cache_node *n, struct page *page)
|
||||
{
|
||||
list_del(&page->lru);
|
||||
n->nr_partial--;
|
||||
}
|
||||
|
||||
static inline void remove_partial(struct kmem_cache_node *n,
|
||||
struct page *page)
|
||||
{
|
||||
lockdep_assert_held(&n->list_lock);
|
||||
__remove_partial(n, page);
|
||||
list_del(&page->lru);
|
||||
n->nr_partial--;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
|
||||
}
|
||||
}
|
||||
|
||||
void __kmem_cache_release(struct kmem_cache *s)
|
||||
{
|
||||
free_percpu(s->cpu_slab);
|
||||
free_kmem_cache_nodes(s);
|
||||
}
|
||||
|
||||
static int init_kmem_cache_nodes(struct kmem_cache *s)
|
||||
{
|
||||
int node;
|
||||
@@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||
|
||||
/*
|
||||
* Attempt to free all partial slabs on a node.
|
||||
* This is called from kmem_cache_close(). We must be the last thread
|
||||
* using the cache and therefore we do not need to lock anymore.
|
||||
* This is called from __kmem_cache_shutdown(). We must take list_lock
|
||||
* because sysfs file might still access partial list after the shutdowning.
|
||||
*/
|
||||
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||
{
|
||||
struct page *page, *h;
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
spin_lock_irq(&n->list_lock);
|
||||
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
||||
if (!page->inuse) {
|
||||
__remove_partial(n, page);
|
||||
remove_partial(n, page);
|
||||
discard_slab(s, page);
|
||||
} else {
|
||||
list_slab_objects(s, page,
|
||||
"Objects remaining in %s on kmem_cache_close()");
|
||||
"Objects remaining in %s on __kmem_cache_shutdown()");
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&n->list_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release all resources used by a slab cache.
|
||||
*/
|
||||
static inline int kmem_cache_close(struct kmem_cache *s)
|
||||
int __kmem_cache_shutdown(struct kmem_cache *s)
|
||||
{
|
||||
int node;
|
||||
struct kmem_cache_node *n;
|
||||
@@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
||||
if (n->nr_partial || slabs_node(s, node))
|
||||
return 1;
|
||||
}
|
||||
free_percpu(s->cpu_slab);
|
||||
free_kmem_cache_nodes(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kmem_cache_shutdown(struct kmem_cache *s)
|
||||
{
|
||||
return kmem_cache_close(s);
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
* Kmalloc subsystem
|
||||
*******************************************************************/
|
||||
@@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
|
||||
memcg_propagate_slab_attrs(s);
|
||||
err = sysfs_slab_add(s);
|
||||
if (err)
|
||||
kmem_cache_close(s);
|
||||
__kmem_cache_release(s);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
Ссылка в новой задаче
Block a user