Merge 5.10.194 into android12-5.10-lts
Changes in 5.10.194 module: Expose module_init_layout_section() arm64: module-plts: inline linux/moduleloader.h arm64: module: Use module_init_layout_section() to spot init sections ARM: module: Use module_init_layout_section() to spot init sections mhi: pci_generic: Fix implicit conversion warning Revert "drm/amdgpu: install stub fence into potential unused fence pointers" Revert "MIPS: Alchemy: fix dbdma2" rcu: Prevent expedited GP from enabling tick on offline CPU rcu-tasks: Fix IPI failure handling in trc_wait_for_one_reader rcu-tasks: Wait for trc_read_check_handler() IPIs rcu-tasks: Add trc_inspect_reader() checks for exiting critical section Linux 5.10.194 Change-Id: I1e4230071f3ca3c9e811aeba31446875028d7b3d Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 193
|
SUBLEVEL = 194
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
|
@@ -256,7 +256,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
|||||||
/* sort by type and symbol index */
|
/* sort by type and symbol index */
|
||||||
sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
|
sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
|
||||||
|
|
||||||
if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
|
if (!module_init_layout_section(secstrings + dstsec->sh_name))
|
||||||
core_plts += count_plts(syms, dstsec->sh_addr, rels,
|
core_plts += count_plts(syms, dstsec->sh_addr, rels,
|
||||||
numrels, s->sh_info);
|
numrels, s->sh_info);
|
||||||
else
|
else
|
||||||
|
@@ -7,6 +7,7 @@
|
|||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/moduleloader.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/sort.h>
|
#include <linux/sort.h>
|
||||||
|
|
||||||
@@ -376,7 +377,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
|||||||
if (nents)
|
if (nents)
|
||||||
sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
|
sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
|
||||||
|
|
||||||
if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
|
if (!module_init_layout_section(secstrings + dstsec->sh_name))
|
||||||
core_plts += count_plts(syms, rels, numrels,
|
core_plts += count_plts(syms, rels, numrels,
|
||||||
sechdrs[i].sh_info, dstsec);
|
sechdrs[i].sh_info, dstsec);
|
||||||
else
|
else
|
||||||
|
@@ -30,7 +30,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
@@ -624,18 +623,17 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
|
|||||||
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
|
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is an erratum on certain Au1200/Au1550 revisions that could
|
* There is an errata on the Au1200/Au1550 parts that could result
|
||||||
* result in "stale" data being DMA'ed. It has to do with the snoop
|
* in "stale" data being DMA'ed. It has to do with the snoop logic on
|
||||||
* logic on the cache eviction buffer. dma_default_coherent is set
|
* the cache eviction buffer. DMA_NONCOHERENT is on by default for
|
||||||
* to false on these parts.
|
* these parts. If it is fixed in the future, these dma_cache_inv will
|
||||||
|
* just be nothing more than empty macros. See io.h.
|
||||||
*/
|
*/
|
||||||
if (!dma_default_coherent)
|
dma_cache_wback_inv((unsigned long)buf, nbytes);
|
||||||
dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
|
|
||||||
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
||||||
wmb(); /* drain writebuffer */
|
wmb(); /* drain writebuffer */
|
||||||
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
||||||
ctp->chan_ptr->ddma_dbell = 0;
|
ctp->chan_ptr->ddma_dbell = 0;
|
||||||
wmb(); /* force doorbell write out to dma engine */
|
|
||||||
|
|
||||||
/* Get next descriptor pointer. */
|
/* Get next descriptor pointer. */
|
||||||
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
||||||
@@ -687,18 +685,17 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
|
|||||||
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
|
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* There is an erratum on certain Au1200/Au1550 revisions that could
|
* There is an errata on the Au1200/Au1550 parts that could result in
|
||||||
* result in "stale" data being DMA'ed. It has to do with the snoop
|
* "stale" data being DMA'ed. It has to do with the snoop logic on the
|
||||||
* logic on the cache eviction buffer. dma_default_coherent is set
|
* cache eviction buffer. DMA_NONCOHERENT is on by default for these
|
||||||
* to false on these parts.
|
* parts. If it is fixed in the future, these dma_cache_inv will just
|
||||||
|
* be nothing more than empty macros. See io.h.
|
||||||
*/
|
*/
|
||||||
if (!dma_default_coherent)
|
dma_cache_inv((unsigned long)buf, nbytes);
|
||||||
dma_cache_inv(KSEG0ADDR(buf), nbytes);
|
|
||||||
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
||||||
wmb(); /* drain writebuffer */
|
wmb(); /* drain writebuffer */
|
||||||
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
||||||
ctp->chan_ptr->ddma_dbell = 0;
|
ctp->chan_ptr->ddma_dbell = 0;
|
||||||
wmb(); /* force doorbell write out to dma engine */
|
|
||||||
|
|
||||||
/* Get next descriptor pointer. */
|
/* Get next descriptor pointer. */
|
||||||
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
||||||
|
@@ -2155,7 +2155,6 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|||||||
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
|
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
|
||||||
|
|
||||||
bo_va->ref_count = 1;
|
bo_va->ref_count = 1;
|
||||||
bo_va->last_pt_update = dma_fence_get_stub();
|
|
||||||
INIT_LIST_HEAD(&bo_va->valids);
|
INIT_LIST_HEAD(&bo_va->valids);
|
||||||
INIT_LIST_HEAD(&bo_va->invalids);
|
INIT_LIST_HEAD(&bo_va->invalids);
|
||||||
|
|
||||||
@@ -2868,8 +2867,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
||||||
else
|
else
|
||||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||||
|
vm->last_update = NULL;
|
||||||
vm->last_update = dma_fence_get_stub();
|
|
||||||
vm->last_unlocked = dma_fence_get_stub();
|
vm->last_unlocked = dma_fence_get_stub();
|
||||||
|
|
||||||
mutex_init(&vm->eviction_lock);
|
mutex_init(&vm->eviction_lock);
|
||||||
@@ -3044,7 +3042,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||||
}
|
}
|
||||||
dma_fence_put(vm->last_update);
|
dma_fence_put(vm->last_update);
|
||||||
vm->last_update = dma_fence_get_stub();
|
vm->last_update = NULL;
|
||||||
vm->is_compute_context = true;
|
vm->is_compute_context = true;
|
||||||
|
|
||||||
if (vm->pasid) {
|
if (vm->pasid) {
|
||||||
|
@@ -39,6 +39,11 @@ bool module_init_section(const char *name);
|
|||||||
*/
|
*/
|
||||||
bool module_exit_section(const char *name);
|
bool module_exit_section(const char *name);
|
||||||
|
|
||||||
|
/* Describes whether within_module_init() will consider this an init section
|
||||||
|
* or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
|
||||||
|
*/
|
||||||
|
bool module_init_layout_section(const char *sname);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Apply the given relocation to the (simplified) ELF. Return -error
|
* Apply the given relocation to the (simplified) ELF. Return -error
|
||||||
* or 0.
|
* or 0.
|
||||||
|
@@ -2297,7 +2297,7 @@ void *__symbol_get(const char *symbol)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__symbol_get);
|
EXPORT_SYMBOL_GPL(__symbol_get);
|
||||||
|
|
||||||
static bool module_init_layout_section(const char *sname)
|
bool module_init_layout_section(const char *sname)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_MODULE_UNLOAD
|
#ifndef CONFIG_MODULE_UNLOAD
|
||||||
if (module_exit_section(sname))
|
if (module_exit_section(sname))
|
||||||
|
@@ -874,7 +874,7 @@ reset_ipi:
|
|||||||
static bool trc_inspect_reader(struct task_struct *t, void *arg)
|
static bool trc_inspect_reader(struct task_struct *t, void *arg)
|
||||||
{
|
{
|
||||||
int cpu = task_cpu(t);
|
int cpu = task_cpu(t);
|
||||||
bool in_qs = false;
|
int nesting;
|
||||||
bool ofl = cpu_is_offline(cpu);
|
bool ofl = cpu_is_offline(cpu);
|
||||||
|
|
||||||
if (task_curr(t)) {
|
if (task_curr(t)) {
|
||||||
@@ -894,18 +894,18 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
|
|||||||
n_heavy_reader_updates++;
|
n_heavy_reader_updates++;
|
||||||
if (ofl)
|
if (ofl)
|
||||||
n_heavy_reader_ofl_updates++;
|
n_heavy_reader_ofl_updates++;
|
||||||
in_qs = true;
|
nesting = 0;
|
||||||
} else {
|
} else {
|
||||||
// The task is not running, so C-language access is safe.
|
// The task is not running, so C-language access is safe.
|
||||||
in_qs = likely(!t->trc_reader_nesting);
|
nesting = t->trc_reader_nesting;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark as checked so that the grace-period kthread will
|
// If not exiting a read-side critical section, mark as checked
|
||||||
// remove it from the holdout list.
|
// so that the grace-period kthread will remove it from the
|
||||||
t->trc_reader_checked = true;
|
// holdout list.
|
||||||
|
t->trc_reader_checked = nesting >= 0;
|
||||||
if (in_qs)
|
if (nesting <= 0)
|
||||||
return true; // Already in quiescent state, done!!!
|
return !nesting; // If in QS, done, otherwise try again later.
|
||||||
|
|
||||||
// The task is in a read-side critical section, so set up its
|
// The task is in a read-side critical section, so set up its
|
||||||
// state so that it will awaken the grace-period kthread upon exit
|
// state so that it will awaken the grace-period kthread upon exit
|
||||||
@@ -958,9 +958,11 @@ static void trc_wait_for_one_reader(struct task_struct *t,
|
|||||||
if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
|
if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
|
||||||
// Just in case there is some other reason for
|
// Just in case there is some other reason for
|
||||||
// failure than the target CPU being offline.
|
// failure than the target CPU being offline.
|
||||||
|
WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
|
||||||
|
__func__, cpu);
|
||||||
rcu_tasks_trace.n_ipis_fails++;
|
rcu_tasks_trace.n_ipis_fails++;
|
||||||
per_cpu(trc_ipi_to_cpu, cpu) = false;
|
per_cpu(trc_ipi_to_cpu, cpu) = false;
|
||||||
t->trc_ipi_to_cpu = cpu;
|
t->trc_ipi_to_cpu = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1081,14 +1083,28 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rcu_tasks_trace_empty_fn(void *unused)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/* Wait for grace period to complete and provide ordering. */
|
/* Wait for grace period to complete and provide ordering. */
|
||||||
static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
|
static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
|
||||||
{
|
{
|
||||||
|
int cpu;
|
||||||
bool firstreport;
|
bool firstreport;
|
||||||
struct task_struct *g, *t;
|
struct task_struct *g, *t;
|
||||||
LIST_HEAD(holdouts);
|
LIST_HEAD(holdouts);
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
|
// Wait for any lingering IPI handlers to complete. Note that
|
||||||
|
// if a CPU has gone offline or transitioned to userspace in the
|
||||||
|
// meantime, all IPI handlers should have been drained beforehand.
|
||||||
|
// Yes, this assumes that CPUs process IPIs in order. If that ever
|
||||||
|
// changes, there will need to be a recheck and/or timed wait.
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
|
||||||
|
smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
|
||||||
|
|
||||||
// Remove the safety count.
|
// Remove the safety count.
|
||||||
smp_mb__before_atomic(); // Order vs. earlier atomics
|
smp_mb__before_atomic(); // Order vs. earlier atomics
|
||||||
atomic_dec(&trc_n_readers_need_end);
|
atomic_dec(&trc_n_readers_need_end);
|
||||||
|
@@ -507,7 +507,10 @@ static void synchronize_rcu_expedited_wait(void)
|
|||||||
if (rdp->rcu_forced_tick_exp)
|
if (rdp->rcu_forced_tick_exp)
|
||||||
continue;
|
continue;
|
||||||
rdp->rcu_forced_tick_exp = true;
|
rdp->rcu_forced_tick_exp = true;
|
||||||
|
preempt_disable();
|
||||||
|
if (cpu_online(cpu))
|
||||||
tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
|
tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
j = READ_ONCE(jiffies_till_first_fqs);
|
j = READ_ONCE(jiffies_till_first_fqs);
|
||||||
|
Reference in New Issue
Block a user