Merge 5.10.128 into android12-5.10-lts

Changes in 5.10.128
	MAINTAINERS: add Amir as xfs maintainer for 5.10.y
	drm: remove drm_fb_helper_modinit
	tick/nohz: unexport __init-annotated tick_nohz_full_setup()
	bcache: memset on stack variables in bch_btree_check() and bch_sectors_dirty_init()
	xfs: use kmem_cache_free() for kmem_cache objects
	xfs: punch out data fork delalloc blocks on COW writeback failure
	xfs: Fix the free logic of state in xfs_attr_node_hasname
	xfs: remove all COW fork extents when remounting readonly
	xfs: check sb_meta_uuid for dabuf buffer recovery
	powerpc/ftrace: Remove ftrace init tramp once kernel init is complete
	net: mscc: ocelot: allow unregistered IP multicast flooding
	Linux 5.10.128

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I2418909b20a3a6a087ca5f9b16a3a7c745e99014
This commit is contained in:
Greg Kroah-Hartman
2022-07-28 16:53:46 +02:00
17 changed files with 71 additions and 70 deletions

View File

@@ -19279,7 +19279,8 @@ F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb* F: drivers/xen/*swiotlb*
XFS FILESYSTEM XFS FILESYSTEM
M: Darrick J. Wong <darrick.wong@oracle.com> M: Amir Goldstein <amir73il@gmail.com>
M: Darrick J. Wong <djwong@kernel.org>
M: linux-xfs@vger.kernel.org M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org L: linux-xfs@vger.kernel.org
S: Supported S: Supported

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 127 SUBLEVEL = 128
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@@ -96,7 +96,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
#endif /* PPC64_ELF_ABI_v1 */ #endif /* PPC64_ELF_ABI_v1 */
#endif /* CONFIG_FTRACE_SYSCALLS */ #endif /* CONFIG_FTRACE_SYSCALLS */
#ifdef CONFIG_PPC64 #if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
#include <asm/paca.h> #include <asm/paca.h>
static inline void this_cpu_disable_ftrace(void) static inline void this_cpu_disable_ftrace(void)
@@ -120,11 +120,13 @@ static inline u8 this_cpu_get_ftrace_enabled(void)
return get_paca()->ftrace_enabled; return get_paca()->ftrace_enabled;
} }
void ftrace_free_init_tramp(void);
#else /* CONFIG_PPC64 */ #else /* CONFIG_PPC64 */
static inline void this_cpu_disable_ftrace(void) { } static inline void this_cpu_disable_ftrace(void) { }
static inline void this_cpu_enable_ftrace(void) { } static inline void this_cpu_enable_ftrace(void) { }
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { } static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; } static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
static inline void ftrace_free_init_tramp(void) { }
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@@ -336,9 +336,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
/* Is this a known long jump tramp? */ /* Is this a known long jump tramp? */
for (i = 0; i < NUM_FTRACE_TRAMPS; i++) for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
if (!ftrace_tramps[i]) if (ftrace_tramps[i] == tramp)
break;
else if (ftrace_tramps[i] == tramp)
return 0; return 0;
/* Is this a known plt tramp? */ /* Is this a known plt tramp? */
@@ -882,6 +880,17 @@ void arch_ftrace_update_code(int command)
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
void ftrace_free_init_tramp(void)
{
int i;
for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
ftrace_tramps[i] = 0;
return;
}
}
int __init ftrace_dyn_arch_init(void) int __init ftrace_dyn_arch_init(void)
{ {
int i; int i;

View File

@@ -51,6 +51,7 @@
#include <asm/kasan.h> #include <asm/kasan.h>
#include <asm/svm.h> #include <asm/svm.h>
#include <asm/mmzone.h> #include <asm/mmzone.h>
#include <asm/ftrace.h>
#include <mm/mmu_decl.h> #include <mm/mmu_decl.h>
@@ -347,6 +348,7 @@ void free_initmem(void)
mark_initmem_nx(); mark_initmem_nx();
init_mem_is_free = true; init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
ftrace_free_init_tramp();
} }
/** /**

View File

@@ -32,16 +32,6 @@
#include <drm/drm_encoder.h> #include <drm/drm_encoder.h>
#include <drm/drm_modes.h> #include <drm/drm_modes.h>
/* drm_fb_helper.c */
#ifdef CONFIG_DRM_FBDEV_EMULATION
int drm_fb_helper_modinit(void);
#else
static inline int drm_fb_helper_modinit(void)
{
return 0;
}
#endif
/* drm_dp_aux_dev.c */ /* drm_dp_aux_dev.c */
#ifdef CONFIG_DRM_DP_AUX_CHARDEV #ifdef CONFIG_DRM_DP_AUX_CHARDEV
int drm_dp_aux_dev_init(void); int drm_dp_aux_dev_init(void);

View File

@@ -2271,24 +2271,3 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
drm_client_register(&fb_helper->client); drm_client_register(&fb_helper->client);
} }
EXPORT_SYMBOL(drm_fbdev_generic_setup); EXPORT_SYMBOL(drm_fbdev_generic_setup);
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
int __init drm_fb_helper_modinit(void)
{
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
const char name[] = "fbcon";
struct module *fbcon;
mutex_lock(&module_mutex);
fbcon = find_module(name);
mutex_unlock(&module_mutex);
if (!fbcon)
request_module_nowait(name);
#endif
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_modinit);

View File

@@ -64,19 +64,18 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void) static int __init drm_kms_helper_init(void)
{ {
int ret; /*
* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable
* console.
*/
if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
!IS_ENABLED(CONFIG_EXPERT))
request_module_nowait("fbcon");
/* Call init functions from specific kms helpers here */ return drm_dp_aux_dev_init();
ret = drm_fb_helper_modinit();
if (ret < 0)
goto out;
ret = drm_dp_aux_dev_init();
if (ret < 0)
goto out;
out:
return ret;
} }
static void __exit drm_kms_helper_exit(void) static void __exit drm_kms_helper_exit(void)

View File

@@ -2017,6 +2017,7 @@ int bch_btree_check(struct cache_set *c)
if (c->root->level == 0) if (c->root->level == 0)
return 0; return 0;
memset(&check_state, 0, sizeof(struct btree_check_state));
check_state.c = c; check_state.c = c;
check_state.total_threads = bch_btree_chkthread_nr(); check_state.total_threads = bch_btree_chkthread_nr();
check_state.key_idx = 0; check_state.key_idx = 0;

View File

@@ -901,6 +901,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
return; return;
} }
memset(&state, 0, sizeof(struct bch_dirty_init_state));
state.c = c; state.c = c;
state.d = d; state.d = d;
state.total_threads = bch_btre_dirty_init_thread_nr(); state.total_threads = bch_btre_dirty_init_thread_nr();

View File

@@ -1593,8 +1593,12 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_MC); ANA_PGID_PGID, PGID_MC);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); ocelot_write_rix(ocelot,
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot,
ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
ANA_PGID_PGID, PGID_MCIPV6);
/* Allow manual injection via DEVCPU_QS registers, and byte swap these /* Allow manual injection via DEVCPU_QS registers, and byte swap these
* registers endianness. * registers endianness.

View File

@@ -876,21 +876,18 @@ xfs_attr_node_hasname(
state = xfs_da_state_alloc(args); state = xfs_da_state_alloc(args);
if (statep != NULL) if (statep != NULL)
*statep = NULL; *statep = state;
/* /*
* Search to see if name exists, and get back a pointer to it. * Search to see if name exists, and get back a pointer to it.
*/ */
error = xfs_da3_node_lookup_int(state, &retval); error = xfs_da3_node_lookup_int(state, &retval);
if (error) { if (error)
xfs_da_state_free(state); retval = error;
return error;
}
if (statep != NULL) if (!statep)
*statep = state;
else
xfs_da_state_free(state); xfs_da_state_free(state);
return retval; return retval;
} }

View File

@@ -145,6 +145,7 @@ xfs_end_ioend(
struct iomap_ioend *ioend) struct iomap_ioend *ioend)
{ {
struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_inode *ip = XFS_I(ioend->io_inode);
struct xfs_mount *mp = ip->i_mount;
xfs_off_t offset = ioend->io_offset; xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size; size_t size = ioend->io_size;
unsigned int nofs_flag; unsigned int nofs_flag;
@@ -160,18 +161,26 @@ xfs_end_ioend(
/* /*
* Just clean up the in-memory strutures if the fs has been shut down. * Just clean up the in-memory strutures if the fs has been shut down.
*/ */
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { if (XFS_FORCED_SHUTDOWN(mp)) {
error = -EIO; error = -EIO;
goto done; goto done;
} }
/* /*
* Clean up any COW blocks on an I/O error. * Clean up all COW blocks and underlying data fork delalloc blocks on
* I/O error. The delalloc punch is required because this ioend was
* mapped to blocks in the COW fork and the associated pages are no
* longer dirty. If we don't remove delalloc blocks here, they become
* stale and can corrupt free space accounting on unmount.
*/ */
error = blk_status_to_errno(ioend->io_bio->bi_status); error = blk_status_to_errno(ioend->io_bio->bi_status);
if (unlikely(error)) { if (unlikely(error)) {
if (ioend->io_flags & IOMAP_F_SHARED) if (ioend->io_flags & IOMAP_F_SHARED) {
xfs_reflink_cancel_cow_range(ip, offset, size, true); xfs_reflink_cancel_cow_range(ip, offset, size, true);
xfs_bmap_punch_delalloc_range(ip,
XFS_B_TO_FSBT(mp, offset),
XFS_B_TO_FSB(mp, size));
}
goto done; goto done;
} }

View File

@@ -805,7 +805,7 @@ xlog_recover_get_buf_lsn(
} }
if (lsn != (xfs_lsn_t)-1) { if (lsn != (xfs_lsn_t)-1) {
if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
goto recover_immediately; goto recover_immediately;
return lsn; return lsn;
} }

View File

@@ -482,7 +482,7 @@ xfs_extent_free_finish_item(
free->xefi_startblock, free->xefi_startblock,
free->xefi_blockcount, free->xefi_blockcount,
&free->xefi_oinfo, free->xefi_skip_discard); &free->xefi_oinfo, free->xefi_skip_discard);
kmem_free(free); kmem_cache_free(xfs_bmap_free_item_zone, free);
return error; return error;
} }
@@ -502,7 +502,7 @@ xfs_extent_free_cancel_item(
struct xfs_extent_free_item *free; struct xfs_extent_free_item *free;
free = container_of(item, struct xfs_extent_free_item, xefi_list); free = container_of(item, struct xfs_extent_free_item, xefi_list);
kmem_free(free); kmem_cache_free(xfs_bmap_free_item_zone, free);
} }
const struct xfs_defer_op_type xfs_extent_free_defer_type = { const struct xfs_defer_op_type xfs_extent_free_defer_type = {
@@ -564,7 +564,7 @@ xfs_agfl_free_finish_item(
extp->ext_len = free->xefi_blockcount; extp->ext_len = free->xefi_blockcount;
efdp->efd_next_extent++; efdp->efd_next_extent++;
kmem_free(free); kmem_cache_free(xfs_bmap_free_item_zone, free);
return error; return error;
} }

View File

@@ -1695,7 +1695,10 @@ static int
xfs_remount_ro( xfs_remount_ro(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
int error; struct xfs_eofblocks eofb = {
.eof_flags = XFS_EOF_FLAGS_SYNC,
};
int error;
/* /*
* Cancel background eofb scanning so it cannot race with the final * Cancel background eofb scanning so it cannot race with the final
@@ -1703,8 +1706,13 @@ xfs_remount_ro(
*/ */
xfs_stop_block_reaping(mp); xfs_stop_block_reaping(mp);
/* Get rid of any leftover CoW reservations... */ /*
error = xfs_icache_free_cowblocks(mp, NULL); * Clear out all remaining COW staging extents and speculative post-EOF
* preallocations so that we don't leave inodes requiring inactivation
* cleanups during reclaim on a read-only mount. We must process every
* cached inode, so this requires a synchronous cache scan.
*/
error = xfs_icache_free_cowblocks(mp, &eofb);
if (error) { if (error) {
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return error; return error;

View File

@@ -428,7 +428,6 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
cpumask_copy(tick_nohz_full_mask, cpumask); cpumask_copy(tick_nohz_full_mask, cpumask);
tick_nohz_full_running = true; tick_nohz_full_running = true;
} }
EXPORT_SYMBOL_GPL(tick_nohz_full_setup);
static int tick_nohz_cpu_down(unsigned int cpu) static int tick_nohz_cpu_down(unsigned int cpu)
{ {